From 3d5d93a0abbb08cd281d56044cd642859bd0394f Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Tue, 23 Jul 2024 14:55:34 +0800 Subject: [PATCH 01/74] sync --- pdf_extract.py | 266 +++++++++++++++++++++++++++---------------------- 1 file changed, 146 insertions(+), 120 deletions(-) diff --git a/pdf_extract.py b/pdf_extract.py index 3dccf1f..4e6f450 100644 --- a/pdf_extract.py +++ b/pdf_extract.py @@ -66,6 +66,43 @@ def __getitem__(self, idx): image = self.transform(raw_image) return image +# def parser_per_image(image): + + +def rough_layout(layout_model, image): + layout_res = layout_model(image, ignore_catids=[]) + return layout_res + +from dataclasses import dataclass + +@dataclass +class FineGrainedConfig: + conf_thres: float = 0.3 + iou_thres: float = 0.5 + verbose: bool = False + +def fine_grained_layout(image, layout_res, config:FineGrainedConfig): + latex_filling_list = [] + mf_image_list = [] + mfd_res = mfd_model.predict(image, imgsz=img_size, conf=config.conf_thres, iou=config.iou_thres, verbose=config.verbose)[0] + for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), + mfd_res.boxes.conf.cpu(), + mfd_res.boxes.cls.cpu()): + xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] + new_item = { + 'category_id': 13 + int(cla.item()), + 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax], + 'score': round(float(conf.item()), 2), + 'latex': '', + } + layout_res['layout_dets'].append(new_item) + latex_filling_list.append(new_item) + bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax]) + mf_image_list.append(bbox_img) + + return latex_filling_list, mf_image_list + +from tqdm.auto import tqdm if __name__ == '__main__': parser = argparse.ArgumentParser() @@ -113,137 +150,126 @@ def __getitem__(self, idx): if img_list is None: continue print("pdf index:", idx, "pages:", len(img_list)) - # layout检测 + 公式检测 + # ================ layout检测 + 公式检测 ==================================== doc_layout_result = [] latex_filling_list = [] mf_image_list = [] - for idx, image in enumerate(img_list): + for idx, image in tqdm(enumerate(img_list)): img_H, img_W = image.shape[0], image.shape[1] - layout_res = layout_model(image, ignore_catids=[]) - # 公式检测 - mfd_res = mfd_model.predict(image, imgsz=img_size, conf=conf_thres, iou=iou_thres, verbose=True)[0] - for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), mfd_res.boxes.cls.cpu()): - xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] - new_item = { - 'category_id': 13 + int(cla.item()), - 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax], - 'score': round(float(conf.item()), 2), - 'latex': '', - } - layout_res['layout_dets'].append(new_item) - latex_filling_list.append(new_item) - bbox_img = get_croped_image(Image.fromarray(image), [xmin, ymin, xmax, ymax]) - mf_image_list.append(bbox_img) - + layout_res = rough_layout(layout_model, image) + latex_filling_list_new, mf_image_list_new = fine_grained_layout(image, layout_res) + latex_filling_list.extend(latex_filling_list_new) + mf_image_list.extend(mf_image_list_new) layout_res['page_info'] = dict( page_no = idx, height = img_H, width = img_W ) doc_layout_result.append(layout_res) - - # 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 - a = time.time() - dataset = MathDataset(mf_image_list, transform=mfr_transform) - dataloader = DataLoader(dataset, batch_size=128, num_workers=32) - mfr_res = [] - for imgs in dataloader: - imgs = imgs.to(device) - output = mfr_model.generate({'image': imgs}) - mfr_res.extend(output['pred_str']) - for res, latex in zip(latex_filling_list, mfr_res): - res['latex'] = latex_rm_whitespace(latex) - b = time.time() - print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) - - # ocr识别 - for idx, image in enumerate(img_list): - pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - single_page_res = doc_layout_result[idx]['layout_dets'] - single_page_mfdetrec_res = [] - for res in single_page_res: - if int(res['category_id']) in [13, 14]: - xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - single_page_mfdetrec_res.append({ - "bbox": [xmin, ymin, xmax, ymax], - }) - for res in single_page_res: - if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 - xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - crop_box = [xmin, ymin, xmax, ymax] - cropped_img = Image.new('RGB', pil_img.size, 'white') - cropped_img.paste(pil_img.crop(crop_box), crop_box) - cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) - ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] - if ocr_res: - for box_ocr_res in ocr_res: - p1, p2, p3, p4 = box_ocr_res[0] - text, score = box_ocr_res[1] - doc_layout_result[idx]['layout_dets'].append({ - 'category_id': 15, - 'poly': p1 + p2 + p3 + p4, - 'score': round(score, 2), - 'text': text, - }) - - output_dir = args.output - os.makedirs(output_dir, exist_ok=True) - basename = os.path.basename(single_pdf)[0:-4] - with open(os.path.join(output_dir, f'{basename}.json'), 'w') as f: - json.dump(doc_layout_result, f) + # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== + # # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== + # a = time.time() + # dataset = MathDataset(mf_image_list, transform=mfr_transform) + # dataloader = DataLoader(dataset, batch_size=128, num_workers=32) + # mfr_res = [] + # for imgs in dataloader: + # imgs = imgs.to(device) + # output = mfr_model.generate({'image': imgs}) + # mfr_res.extend(output['pred_str']) + # for res, latex in zip(latex_filling_list, mfr_res): + # res['latex'] = latex_rm_whitespace(latex) + # b = time.time() + # print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) - if args.vis: - color_palette = [ - (255,64,255),(255,255,0),(0,255,255),(255,215,135),(215,0,95),(100,0,48),(0,175,0),(95,0,95),(175,95,0),(95,95,0), - (95,95,255),(95,175,135),(215,95,0),(0,0,255),(0,255,0),(255,0,0),(0,95,215),(0,0,0),(0,0,0),(0,0,0) - ] - id2names = ["title", "plain_text", "abandon", "figure", "figure_caption", "table", "table_caption", "table_footnote", - "isolate_formula", "formula_caption", " ", " ", " ", "inline_formula", "isolated_formula", "ocr_text"] - vis_pdf_result = [] - for idx, image in enumerate(img_list): - single_page_res = doc_layout_result[idx]['layout_dets'] - vis_img = Image.new('RGB', Image.fromarray(image).size, 'white') if args.render else Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - draw = ImageDraw.Draw(vis_img) - for res in single_page_res: - label = int(res['category_id']) - if label > 15: # 筛选要可视化的类别 - continue - label_name = id2names[label] - x_min, y_min = int(res['poly'][0]), int(res['poly'][1]) - x_max, y_max = int(res['poly'][4]), int(res['poly'][5]) - if args.render and label in [13, 14, 15]: - try: - if label in [13, 14]: # 渲染公式 - window_img = tex2pil(res['latex'])[0] - else: - if True: # 渲染中文 - window_img = zhtext2pil(res['text']) - else: # 渲染英文 - window_img = tex2pil([res['text']], tex_type="text")[0] - ratio = min((x_max - x_min) / window_img.width, (y_max - y_min) / window_img.height) - 0.05 - window_img = window_img.resize((int(window_img.width * ratio), int(window_img.height * ratio))) - vis_img.paste(window_img, (int(x_min + (x_max-x_min-window_img.width) / 2), int(y_min + (y_max-y_min-window_img.height) / 2))) - except Exception as e: - print(f"got exception on {text}, error info: {e}") - draw.rectangle([x_min, y_min, x_max, y_max], fill=None, outline=color_palette[label], width=1) - fontText = ImageFont.truetype("assets/fonts/simhei.ttf", 15, encoding="utf-8") - draw.text((x_min, y_min), label_name, color_palette[label], font=fontText) + # # ================ ocr识别 ==================================== + # # + # for idx, image in enumerate(img_list): + # pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) + # single_page_res = doc_layout_result[idx]['layout_dets'] + # single_page_mfdetrec_res = [] + # for res in single_page_res: + # if int(res['category_id']) in [13, 14]: + # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + # single_page_mfdetrec_res.append({ + # "bbox": [xmin, ymin, xmax, ymax], + # }) + # for res in single_page_res: + # if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 + # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + # crop_box = [xmin, ymin, xmax, ymax] + # cropped_img = Image.new('RGB', pil_img.size, 'white') + # cropped_img.paste(pil_img.crop(crop_box), crop_box) + # cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) + # ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] + # if ocr_res: + # for box_ocr_res in ocr_res: + # p1, p2, p3, p4 = box_ocr_res[0] + # text, score = box_ocr_res[1] + # doc_layout_result[idx]['layout_dets'].append({ + # 'category_id': 15, + # 'poly': p1 + p2 + p3 + p4, + # 'score': round(score, 2), + # 'text': text, + # }) + + # output_dir = args.output + # os.makedirs(output_dir, exist_ok=True) + # basename = os.path.basename(single_pdf)[0:-4] + # with open(os.path.join(output_dir, f'{basename}.json'), 'w') as f: + # json.dump(doc_layout_result, f) + + # if args.vis: + # color_palette = [ + # (255,64,255),(255,255,0),(0,255,255),(255,215,135),(215,0,95),(100,0,48),(0,175,0),(95,0,95),(175,95,0),(95,95,0), + # (95,95,255),(95,175,135),(215,95,0),(0,0,255),(0,255,0),(255,0,0),(0,95,215),(0,0,0),(0,0,0),(0,0,0) + # ] + # id2names = ["title", "plain_text", "abandon", "figure", "figure_caption", "table", "table_caption", "table_footnote", + # "isolate_formula", "formula_caption", " ", " ", " ", "inline_formula", "isolated_formula", "ocr_text"] + # vis_pdf_result = [] + # for idx, image in enumerate(img_list): + # single_page_res = doc_layout_result[idx]['layout_dets'] + # vis_img = Image.new('RGB', Image.fromarray(image).size, 'white') if args.render else Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) + # draw = ImageDraw.Draw(vis_img) + # for res in single_page_res: + # label = int(res['category_id']) + # if label > 15: # 筛选要可视化的类别 + # continue + # label_name = id2names[label] + # x_min, y_min = int(res['poly'][0]), int(res['poly'][1]) + # x_max, y_max = int(res['poly'][4]), int(res['poly'][5]) + # if args.render and label in [13, 14, 15]: + # try: + # if label in [13, 14]: # 渲染公式 + # window_img = tex2pil(res['latex'])[0] + # else: + # if True: # 渲染中文 + # window_img = zhtext2pil(res['text']) + # else: # 渲染英文 + # window_img = tex2pil([res['text']], tex_type="text")[0] + # ratio = min((x_max - x_min) / window_img.width, (y_max - y_min) / window_img.height) - 0.05 + # window_img = window_img.resize((int(window_img.width * ratio), int(window_img.height * ratio))) + # vis_img.paste(window_img, (int(x_min + (x_max-x_min-window_img.width) / 2), int(y_min + (y_max-y_min-window_img.height) / 2))) + # except Exception as e: + # print(f"got exception on {text}, error info: {e}") + # draw.rectangle([x_min, y_min, x_max, y_max], fill=None, outline=color_palette[label], width=1) + # fontText = ImageFont.truetype("assets/fonts/simhei.ttf", 15, encoding="utf-8") + # draw.text((x_min, y_min), label_name, color_palette[label], font=fontText) - width, height = vis_img.size - width, height = int(0.75*width), int(0.75*height) - vis_img = vis_img.resize((width, height)) - vis_pdf_result.append(vis_img) + # width, height = vis_img.size + # width, height = int(0.75*width), int(0.75*height) + # vis_img = vis_img.resize((width, height)) + # vis_pdf_result.append(vis_img) - first_page = vis_pdf_result.pop(0) - first_page.save(os.path.join(output_dir, f'{basename}.pdf'), 'PDF', resolution=100, save_all=True, append_images=vis_pdf_result) - try: - shutil.rmtree('./temp') - except: - pass + # first_page = vis_pdf_result.pop(0) + # first_page.save(os.path.join(output_dir, f'{basename}.pdf'), 'PDF', resolution=100, save_all=True, append_images=vis_pdf_result) + # try: + # shutil.rmtree('./temp') + # except: + # pass - now = datetime.datetime.now(tz) - end = time.time() - print(now.strftime('%Y-%m-%d %H:%M:%S')) - print('Finished! time cost:', int(end-start), 's') \ No newline at end of file + # now = datetime.datetime.now(tz) + # end = time.time() + # print(now.strftime('%Y-%m-%d %H:%M:%S')) + # print('Finished! time cost:', int(end-start), 's') \ No newline at end of file From 8bbef33309675ef74ce81c05634da8a220196c3f Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Tue, 23 Jul 2024 18:42:25 +0800 Subject: [PATCH 02/74] add rough layout module --- batch_run_utils.py | 114 +++++++++++++++++++++++++++++++++ get_data_utils.py | 103 ++++++++++++++++++++++++++++++ models/README.md | 37 ----------- rough_layout.py | 153 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 370 insertions(+), 37 deletions(-) create mode 100644 batch_run_utils.py create mode 100644 get_data_utils.py delete mode 100644 models/README.md create mode 100644 rough_layout.py diff --git a/batch_run_utils.py b/batch_run_utils.py new file mode 100644 index 0000000..fb64192 --- /dev/null +++ b/batch_run_utils.py @@ -0,0 +1,114 @@ +from tqdm.auto import tqdm +from multiprocessing import Pool +import numpy as np +import argparse +import os +from dataclasses import dataclass +from typing import List +@dataclass +class BatchModeConfig: + task_name = 'temp' + root_path : str + + index_part : int = 0 + num_parts : int = 1 + datapath : str = None + savepath : str = None + logpath : str = "analysis" + batch_num : int = 0 + redo : bool = False + shuffle: bool = False + debug:bool=False + verbose: bool = False + ray_nodes: List[int] = None + debug: bool = False + + def from_dict(kargs): + return BatchModeConfig(**kargs) + def to_dict(self): + return self.__dict__ + + + +def process_files(func, file_list, args:BatchModeConfig): + + num_processes = args.batch_num + if num_processes == 0: + results = [] + for arxivpath in tqdm(file_list): + results.append(func((arxivpath, args))) + return results + else: + with Pool(processes=num_processes) as pool: + args_list = [(file, args) for file in file_list] + results = list(tqdm(pool.imap(func, args_list), total=len(file_list))) + return results + +import json +def obtain_processed_filelist(args:BatchModeConfig): + ROOT_PATH = args.root_path + index_part= args.index_part + num_parts = args.num_parts + + if ROOT_PATH.endswith('.json'): + with open(ROOT_PATH,'r') as f: + alread_processing_file_list = json.load(f) + elif os.path.isfile(ROOT_PATH): + if ROOT_PATH.endswith('.filelist'): + with open(ROOT_PATH,'r') as f: + alread_processing_file_list = [t.strip() for t in f.readlines()] + elif ROOT_PATH.endswith('.arxivids'): + with open(ROOT_PATH,'r') as f: + alread_processing_file_list = [os.path.join(args.datapath, t.strip()) for t in f.readlines()] + else: + alread_processing_file_list = [ROOT_PATH] + elif os.path.isdir(ROOT_PATH): + ### this means we will do the whole subfiles under this folder + alread_processing_file_list = os.listdir(ROOT_PATH) + alread_processing_file_list = [os.path.join(ROOT_PATH,t) for t in alread_processing_file_list] + else: + ### directly use the arxivid as input + alread_processing_file_list = [os.path.join(args.datapath, ROOT_PATH)] + + totally_paper_num = len(alread_processing_file_list) + if totally_paper_num > 1: + divided_nums = np.linspace(0, totally_paper_num , num_parts+1) + divided_nums = [int(s) for s in divided_nums] + start_index = divided_nums[index_part] + end_index = divided_nums[index_part + 1] + else: + start_index = 0 + end_index = 1 + args.start_index = start_index + args.end_index = end_index + if args.shuffle: + np.random.shuffle(alread_processing_file_list) + alread_processing_file_list = alread_processing_file_list[start_index:end_index] + + return alread_processing_file_list + + +def save_analysis(analysis, debug, args): + + logpath = os.path.join(args.logpath,args.task_name) + print(logpath) + os.makedirs(logpath, exist_ok=True) + if args.num_parts > 1: + for key, val in analysis.items(): + print(f"{key}=>{len(val)}") + fold = os.path.join(logpath,f"{key.lower()}.filelist.split") + os.makedirs(fold, exist_ok=True) + with open(os.path.join(fold,f"{args.start_index}-{args.end_index}"), 'w') as f: + for line in val: + f.write(line+'\n') + else: + #print(analysis) + for key, val in analysis.items(): + print(f"{key}=>{len(val)}") + if debug: + print(val) + else: + with open(os.path.join(logpath,f"{key.lower()}.filelist"), 'w') as f: + for line in val: + f.write(line+'\n') + \ No newline at end of file diff --git a/get_data_utils.py b/get_data_utils.py new file mode 100644 index 0000000..3f8c441 --- /dev/null +++ b/get_data_utils.py @@ -0,0 +1,103 @@ + +import json +import requests +import io +import os +import fitz +def read_json_from_path(path, client): + if "s3" in path: + buffer = client.get(path).replace(b'\x00', b'\n') + if path.endswith('.json'): + return json.loads(buffer) + elif path.endswith('.jsonl'): + whole_data = [] + for t in io.BytesIO(buffer).readlines(): + try: + data = json.loads(t) + except: + print(t) + raise + whole_data.append(data) + return whole_data + else: + return {'content':str(buffer)} + elif path.startswith('http'): + response = requests.get(path) + if response.status_code == 200: + content = response.json()["content"] + if path.endswith('.json'): + content = json.loads(content) + elif path.endswith('.md'): + content = {'content':content} + return content + else: + return None + else: + if path.endswith('.json'): + with open(path,'r') as f: + data = json.load(f) + return data + elif path.endswith('.jsonl'): + with open(path,'r') as f: + whole_data = [] + for t in f.readlines(): + data = json.loads(t) + whole_data.append(data) + return whole_data + else: + raise NotImplementedError("please use json or jsonl file") + +def write_json_to_path(data, path, client): + if "s3" in path: + byte_object = json.dumps(data).encode('utf-8') + with io.BytesIO(byte_object) as f: + client.put(path, f) + else: + assert not path.startswith('http'), "why you want to save the file to a online path?" + thedir = os.path.dirname(path) + os.makedirs(thedir, exist_ok=True) + with open(path,'w') as f: + json.dump(data, f) + +def build_client(): + print(f"we will building ceph client...................") + from petrel_client.client import Client # 安装完成后才可导入 + client = Client(conf_path="~/petreloss.conf") # 实例化Petrel Client,然后就可以调用下面的APIs + print(f"done..................") + return client + +def check_path_exists(path,client): + if "s3" in path: + return client.contains(path) + elif path.startswith('http'): + assert 'get_data' in path, "please use get_data flag for data path" + response = requests.get(path.replace('get_data','checkfile')) + if response.status_code == 200: + status = response.json()["status"] + return status + else: + return False + else: + return os.path.exists(path) + +def check_lock_exists(path, client): + if "s3" in path: + raise NotImplementedError("s3 lock not implemented") + elif path.startswith('http'): + assert 'get_data' in path, "please use get_data flag for data path" + response = requests.get(path.replace('get_data','checklock')) + if response.status_code == 200: + status = response.json()["status"] + return status + else: + return False + else: + raise NotImplementedError("please donot use lock lock") + return os.path.exists(path) + +def read_pdf_from_path(path, client): + if "s3" in path: + buffer = client.get(path) + return fitz.open(stream = buffer, filetype="pdf") + else: + return fitz.open(path) \ No newline at end of file diff --git a/models/README.md b/models/README.md deleted file mode 100644 index f523343..0000000 --- a/models/README.md +++ /dev/null @@ -1,37 +0,0 @@ -#### Install Git LFS -Before you begin, make sure Git Large File Storage (Git LFS) is installed on your system. Install it using the following command: - -```bash -git lfs install -``` - -#### Download the Model from Hugging Face -To download the `PDF-Extract-Kit` model from Hugging Face, use the following command: - -```bash -git lfs clone https://huggingface.co/wanderkid/PDF-Extract-Kit -``` - -Ensure that Git LFS is enabled during the clone to properly download all large files. - - - -Put [model files]() here: - -``` -./ -├── Layout -│ ├── config.json -│ └── weights.pth -├── MFD -│ └── weights.pt -├── MFR -│ └── UniMERNet -│ ├── config.json -│ ├── preprocessor_config.json -│ ├── pytorch_model.bin -│ ├── README.md -│ ├── tokenizer_config.json -│ └── tokenizer.json -└── README.md -``` \ No newline at end of file diff --git a/rough_layout.py b/rough_layout.py new file mode 100644 index 0000000..4ce1850 --- /dev/null +++ b/rough_layout.py @@ -0,0 +1,153 @@ + + + +from get_data_utils import * + +from typing import List +from PIL import Image +import numpy as np +from torch.utils.data import IterableDataset, DataLoader +from transformers import NougatImageProcessor +UNIFIED_WIDTH = 1650 +UNIFIED_HEIGHT = 2150 +def process_pdf_page_to_image(page, dpi): + + + pix = page.get_pixmap(matrix=fitz.Matrix(dpi/72, dpi/72)) + image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) + + # if pix.width > UNIFIED_WIDTH or pix.height > UNIFIED_HEIGHT: + # pix = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False) + # image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) + + + # Resize the image while maintaining aspect ratio + img_aspect = image.width / image.height + target_aspect = UNIFIED_WIDTH / UNIFIED_HEIGHT + + if img_aspect > target_aspect: + # Image is wider than target aspect ratio + new_width = UNIFIED_WIDTH + new_height = int(UNIFIED_WIDTH / img_aspect) + else: + # Image is taller than target aspect ratio + new_width = int(UNIFIED_HEIGHT * img_aspect) + new_height = UNIFIED_HEIGHT + + # Resize the image + image = image.resize((new_width, new_height), Image.ANTIALIAS) + + pad_width = (UNIFIED_WIDTH - image.width) // 2 + pad_height = (UNIFIED_HEIGHT - image.height) // 2 + + # Create a new image with the unified size and paste the resized image onto it with padding + new_image = Image.new('RGB', (UNIFIED_WIDTH, UNIFIED_HEIGHT), (255, 255, 255)) + new_image.paste(image, (pad_width, pad_height)) + + return new_image + + +class PDFImageDataset(IterableDataset): + client = None + def __init__(self, metadata_filepath): + super().__init__() + self.metadata= self.smart_read_json(metadata_filepath) + self.pathlist= [t['path'] for t in self.metadata] + self.last_read_pdf_buffer = {} + self.dpi = 200 + + def smart_read_json(self, json_path): + if "s3" in json_path and self.client is None: self.client = build_client() + if json_path.startswith("s3"): json_path = "opendata:"+ json_path + return read_json_from_path(json_path, self.client) + + def smart_write_json(self, data, targetpath): + if "s3" in targetpath and self.client is None: self.client = build_client() + if json_path.startswith("s3"): json_path = "opendata:"+ json_path + write_json_to_path(data, targetpath, self.client) + + def smart_load_pdf(self, pdf_path): + if "s3" in pdf_path and self.client is None: self.client = build_client() + if pdf_path.startswith("s3"): pdf_path = "opendata:"+ pdf_path + return read_pdf_from_path(pdf_path, self.client) + + def clean_pdf_buffer(self): + keys = list(self.last_read_pdf_buffer.keys()) + for key in keys: + self.last_read_pdf_buffer[key].close() + del self.last_read_pdf_buffer[key] + + def get_pdf_buffer(self,path): + if "s3" in path and self.client is None: self.client = build_client() + if path.startswith("s3"): path = "opendata:"+ path + if path not in self.last_read_pdf_buffer: + self.clean_pdf_buffer() + self.last_read_pdf_buffer[path] = self.smart_load_pdf(path) + pdf_buffer = self.last_read_pdf_buffer[path] + return pdf_buffer + + def get_pdf_by_index(self,index): + pdf_path = self.pathlist[index] + return self.get_pdf_buffer(pdf_path) + + + + + def __len__(self): + return len(self.pathlist) + + def __iter__(self): + self.current_pdf_index = 0 + self.current_page_index = 0 + self.last_read_pdf_buffer = {} + return self + + @property + def current_doc(self): + if len(self.last_read_pdf_buffer)==0:return None + return list(self.last_read_pdf_buffer.values())[0] + + def __next__(self): + if self.current_pdf_index >= len(self.pathlist): + raise StopIteration + + if self.current_page_index >= self.get_pdf_by_index(self.current_pdf_index).page_count: + self.current_pdf_index += 1 + self.current_page_index = 0 + + if self.current_pdf_index >= len(self.pathlist): + raise StopIteration + + page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) + image = process_pdf_page_to_image(page, self.dpi) + + image = np.array(image)[:,:,::-1].copy() + + return image + +dataset = PDFImageDataset("part-66210c190659-000035.jsonl") +dataloader = DataLoader(dataset, batch_size=4) + +import yaml +from modules.layoutlmv3.model_init import Layoutlmv3_Predictor +def rough_layout(layout_model, image): + layout_res = layout_model(image, ignore_catids=[]) + return layout_res +def layout_model_init(weight): + model = Layoutlmv3_Predictor(weight) + return model + +with open('configs/model_configs.yaml') as f: + model_configs = yaml.load(f, Loader=yaml.FullLoader) +layout_model = layout_model_init(model_configs['model_args']['layout_weight']) +img_size = model_configs['model_args']['img_size'] +conf_thres= model_configs['model_args']['conf_thres'] +iou_thres = model_configs['model_args']['iou_thres'] +device = model_configs['model_args']['device'] +dpi = model_configs['model_args']['pdf_dpi'] + +for i, images in enumerate(dataloader): + print(f"Batch {i + 1}") + print(images.shape) # Should print torch.Size([4, 3, 224, 224]) + layout_res = layout_model(images, ignore_catids=[]) + print(layout_res.shape) # Should print torch.Size([4, 3, 224, 224]) From ed136d41103df8d4d279ee72144032b4b2395189 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 24 Jul 2024 08:47:20 +0000 Subject: [PATCH 03/74] add batch processing --- dataaccelerate.py | 89 +++++++++++ get_batch_layout_model.py | 239 +++++++++++++++++++++++++++++ get_batch_yolo.py | 77 ++++++++++ modules/layoutlmv3/model_init.py | 3 +- pdf_extract.py | 3 + rough_layout.py | 251 ++++++++++++++++++++++--------- 6 files changed, 589 insertions(+), 73 deletions(-) create mode 100644 dataaccelerate.py create mode 100644 get_batch_layout_model.py create mode 100644 get_batch_yolo.py diff --git a/dataaccelerate.py b/dataaccelerate.py new file mode 100644 index 0000000..c7f74dc --- /dev/null +++ b/dataaccelerate.py @@ -0,0 +1,89 @@ +#pip install prefetch_generator + +# 新建DataLoaderX类 +from torch.utils.data import DataLoader +import numpy as np +import torch + +def sendall2gpu(listinlist,device): + if isinstance(listinlist,(list,tuple)): + return [sendall2gpu(_list,device) for _list in listinlist] + elif isinstance(listinlist, (dict)): + return dict([(key,sendall2gpu(val,device)) for key,val in listinlist.items()]) + elif isinstance(listinlist, np.ndarray): + return torch.from_numpy(listinlist).to(device=device, non_blocking=True) + else: + return listinlist.to(device=device, non_blocking=True) +try: + from prefetch_generator import BackgroundGenerator + class DataLoaderX(DataLoader): + def __iter__(self): + return BackgroundGenerator(super().__iter__()) +except: + pass#DataLoaderX = DataLoader +class DataSimfetcher(): + def __init__(self, loader, device='auto'): + + if device == 'auto': + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + else: + self.device = device + self.loader = iter(loader) + + def next(self): + try: + + self.batch = next(self.loader) + self.batch = sendall2gpu(self.batch,self.device) + except StopIteration: + self.batch = None + return self.batch +class DataPrefetcher(): + def __init__(self, loader, device='auto'): + if device == 'auto': + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + else: + self.device = device#raise NotImplementedError + self.loader = iter(loader) + self.stream = torch.cuda.Stream() + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.mean = self.mean.half() + # self.std = self.std.half() + self.preload() + + def preload(self): + try: + self.batch = next(self.loader) + except StopIteration: + self.batch = None + return + with torch.cuda.stream(self.stream): + self.batch = sendall2gpu(self.batch,self.device) + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.next_input = self.next_input.half() + # else: + # self.next_input = self.next_input.float() + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + batch = self.batch + self.preload() + return batch + +class infinite_batcher: + def __init__(self,data_loader, device='auto'): + self.length=len(data_loader) + self.now=-1 + self.data_loader= data_loader + self.prefetcher = None + self.device = device + def next(self): + if (self.now >= self.length) or (self.now == -1): + if self.prefetcher is not None:del self.prefetcher + self.prefetcher = DataSimfetcher(self.data_loader,device=self.device) + self.now=0 + self.now+=1 + return self.prefetcher.next() + diff --git a/get_batch_layout_model.py b/get_batch_layout_model.py new file mode 100644 index 0000000..8f83484 --- /dev/null +++ b/get_batch_layout_model.py @@ -0,0 +1,239 @@ +from modules.layoutlmv3.model_init import * + +import time, math +class _DummyTimer: + """A dummy timer that does nothing.""" + + def __enter__(self): + return self + + def __exit__(self, *args): + pass + +class _Timer: + """Timer.""" + + def __init__(self, name): + self.name = name + self.count = 0 + self.mean = 0.0 + self.sum_squares = 0.0 + self.start_time = None + + def __enter__(self): + self.start_time = time.time() + + def __exit__(self, exc_type, exc_value, exc_traceback): + elapsed_time = time.time() - self.start_time + self.update(elapsed_time) + self.start_time = None + + def update(self, elapsed_time): + self.count += 1 + delta = elapsed_time - self.mean + self.mean += delta / self.count + delta2 = elapsed_time - self.mean + self.sum_squares += delta * delta2 + + def mean_elapsed(self): + return self.mean + + def std_elapsed(self): + if self.count > 1: + variance = self.sum_squares / (self.count - 1) + return math.sqrt(variance) + else: + return 0.0 + +class Timers: + """Group of timers.""" + + def __init__(self, activate=False): + self.timers = {} + self.activate = activate + def __call__(self, name): + if not self.activate:return _DummyTimer() + if name not in self.timers: + self.timers[name] = _Timer(name) + return self.timers[name] + + def log(self, names=None, normalizer=1.0): + """Log a group of timers.""" + assert normalizer > 0.0 + if names is None: + names = self.timers.keys() + print("Timer Results:") + for name in names: + mean_elapsed = self.timers[name].mean_elapsed() * 1000.0 / normalizer + std_elapsed = self.timers[name].std_elapsed() * 1000.0 / normalizer + space_num = " "*name.count('/') + print(f"{space_num}{name}: {mean_elapsed:.2f}±{std_elapsed:.2f} ms") + +def inference( + self, + batched_inputs: List[Dict[str, torch.Tensor]], + detected_instances: Optional[List[Instances]] = None, + do_postprocess: bool = True, + timers = None + ): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + detected_instances (None or list[Instances]): if not None, it + contains an `Instances` object per image. The `Instances` + object contains "pred_boxes" and "pred_classes" which are + known boxes in the image. + The inference will then skip the detection of bounding boxes, + and only predict other per-ROI outputs. + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + When do_postprocess=True, same as in :meth:`forward`. + Otherwise, a list[Instances] containing raw network outputs. + """ + assert not self.training + + with timers('inference/preprocess_image'): + images = self.preprocess_image(batched_inputs) + # features = self.backbone(images.tensor) + with timers('inference/get_batch'): + input = self.get_batch(batched_inputs, images) + with timers('inference/get_features'): + features = self.backbone(input) + with timers('inference/merge_proposals'): + if detected_instances is None: + if self.proposal_generator is not None: + with timers('merge_proposals/compute_proposals'): + proposals, _ = self.proposal_generator(images, features, None) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + with timers('inference/merge_proposals/roi_heads'): + results, _ = self.roi_heads(images, features, proposals, None) + else: + detected_instances = [x.to(self.device) for x in detected_instances] + results = self.roi_heads.forward_with_given_boxes(features, detected_instances) + with timers('inference/postprocess'): + if do_postprocess: + assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." + results = GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) + return results + +class Layoutlmv3_BatchPredictor(Layoutlmv3_Predictor): + + timers = Timers(False) + + def batch_predict(self, image_and_height_and_width,timers): + with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 + images,heights, widths = image_and_height_and_width + inputs =[ {"image": image, "height": height, "width": width} for image, height, width in zip(images,heights, widths)] + #inputs = {"image": images, "height": heights, "width": widths} + predictions = inference(self.predictor.model,inputs,timers=timers) + return predictions + + def __call__(self, image, ignore_catids=[]): + with self.timers('inference'): + outputslist = self.batch_predict(image, self.timers) + use_old_bbox_collection = False + if use_old_bbox_collection: + with self.timers('wholepost'): + page_layout_result_list = [] + for outputs in outputslist: + page_layout_result = { + "layout_dets": [] + } + + # Convert tensor data to numpy arrays + with self.timers('wholepost/to_numpy'): + boxes = outputs["instances"].to("cpu")._fields["pred_boxes"].tensor.numpy() + labels = outputs["instances"].to("cpu")._fields["pred_classes"].numpy() + scores = outputs["instances"].to("cpu")._fields["scores"].numpy() + + with self.timers('wholepost/compute_mask'): + # Create a mask for filtering out the ignored categories + mask = np.isin(labels, ignore_catids, invert=True) + + with self.timers('wholepost/slicing'): + # Apply the mask to filter out the ignored categories + filtered_boxes = boxes[mask] + filtered_labels = labels[mask] + filtered_scores = scores[mask] + + with self.timers('wholepost/stack'): + # Collect the layout details + polys = np.column_stack([ + filtered_boxes[:, 0], filtered_boxes[:, 1], + filtered_boxes[:, 2], filtered_boxes[:, 1], + filtered_boxes[:, 2], filtered_boxes[:, 3], + filtered_boxes[:, 0], filtered_boxes[:, 3] + ]) + + with self.timers('wholepost/restack_layout'): + # Populate the layout_dets + for i in range(len(filtered_labels)): + page_layout_result["layout_dets"].append({ + "category_id": filtered_labels[i], + "poly": polys[i].tolist(), + "score": filtered_scores[i] + }) + + page_layout_result_list.append(page_layout_result) + else: + with self.timers('wholepost'): + page_layout_result_list = [] + for outputs in outputslist: + page_layout_result = { + "layout_dets": [] + } + instances = outputs["instances"] + # Convert tensor data to numpy arrays + with self.timers('wholepost/to_numpy1'): + boxes = instances._fields["pred_boxes"].tensor + labels = instances._fields["pred_classes"] + scores = instances._fields["scores"] + + with self.timers('wholepost/compute_mask'): + # Create a mask for filtering out the ignored categories + ignore_catids_tensor = torch.tensor(ignore_catids, device=labels.device) + mask = ~torch.isin(labels, ignore_catids_tensor) + + with self.timers('wholepost/slicing'): + # Apply the mask to filter out the ignored categories + filtered_boxes = boxes[mask] + filtered_labels = labels[mask] + filtered_scores = scores[mask] + + with self.timers('wholepost/to_numpy2'): + filtered_boxes = filtered_boxes.cpu().numpy() + filtered_labels = filtered_labels.cpu().numpy() + filtered_scores = filtered_scores.cpu().numpy() + + with self.timers('wholepost/stack'): + # Collect the layout details + polys = np.column_stack([ + filtered_boxes[:, 0], filtered_boxes[:, 1], + filtered_boxes[:, 2], filtered_boxes[:, 1], + filtered_boxes[:, 2], filtered_boxes[:, 3], + filtered_boxes[:, 0], filtered_boxes[:, 3] + ]) + + with self.timers('wholepost/restack_layout'): + # Populate the layout_dets + for i in range(len(filtered_labels)): + page_layout_result["layout_dets"].append({ + "category_id": filtered_labels[i], + "poly": polys[i].tolist(), + "score": filtered_scores[i] + }) + + page_layout_result_list.append(page_layout_result) + #self.timers.log() + return page_layout_result_list + + + +def get_layout_model(model_configs): + model = Layoutlmv3_BatchPredictor(model_configs['model_args']['layout_weight']) + return model \ No newline at end of file diff --git a/get_batch_yolo.py b/get_batch_yolo.py new file mode 100644 index 0000000..168b699 --- /dev/null +++ b/get_batch_yolo.py @@ -0,0 +1,77 @@ +from ultralytics.engine.results import Results +from ultralytics.utils import ops +from ultralytics.utils import ARGV +from ultralytics.data.augment import LetterBox +from ultralytics.utils.checks import check_imgsz +def build_mfd_predictor( + self, + stream: bool = False, + predictor=None, + **kwargs, + ): + + + is_cli = (ARGV[0].endswith("yolo") or ARGV[0].endswith("ultralytics")) and any( + x in ARGV for x in ("predict", "track", "mode=predict", "mode=track") + ) + + custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict"} # method defaults + args = {**self.overrides, **custom, **kwargs} # highest priority args on the right + prompts = args.pop("prompts", None) # for SAM-type models + + if not self.predictor: + self.predictor = predictor or self._smart_load("predictor")(overrides=args, _callbacks=self.callbacks) + self.predictor.setup_model(model=self.model, verbose=is_cli) +class mfd_process: + def __init__(self, imgsz, stride, pt): + self.imgsz = imgsz + + self.stride = stride + self.pt = pt + def __call__(self, im): + """ + Pre-transform input image before inference. + + Args: + im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list. + + Returns: + (list): A list of transformed images. + """ + imgsz = check_imgsz(self.imgsz, stride=self.stride, min_dim=2) + same_shapes = len({x.shape for x in im}) == 1 + letterbox = LetterBox(imgsz, auto=same_shapes and self.pt, stride=self.stride) + return [letterbox(image=x) for x in im] + +def fastpostprocess(self, preds, img, orig_imgs): + """Post-processes predictions and returns a list of Results objects.""" + + preds = ops.non_max_suppression( + preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes, + ) + # if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list + # orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) ## <-- this step only convert the channel order back and to cpu and to uni8, no need this + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + img_path = self.batch[0][i] + results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results +# import ultralytics +# ultralytics.models.yolo.detect.DetectionPredictor.postprocess = fastpostprocess +from ultralytics import YOLO +def get_batch_YOLO_model(model_configs)->YOLO: + mfd_model = YOLO(model_configs['model_args']['mfd_weight']) + img_size = model_configs['model_args']['img_size'] + conf_thres= model_configs['model_args']['conf_thres'] + iou_thres = model_configs['model_args']['iou_thres'] + build_mfd_predictor(mfd_model , imgsz=img_size, conf=conf_thres, iou=iou_thres, verbose=False) + return mfd_model + + diff --git a/modules/layoutlmv3/model_init.py b/modules/layoutlmv3/model_init.py index 671ed16..6631934 100644 --- a/modules/layoutlmv3/model_init.py +++ b/modules/layoutlmv3/model_init.py @@ -138,4 +138,5 @@ def __call__(self, image, ignore_catids=[]): ], "score": scores[bbox_idx] }) - return page_layout_result \ No newline at end of file + return page_layout_result + diff --git a/pdf_extract.py b/pdf_extract.py index 4e6f450..d2b2063 100644 --- a/pdf_extract.py +++ b/pdf_extract.py @@ -122,6 +122,7 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): with open('configs/model_configs.yaml') as f: model_configs = yaml.load(f, Loader=yaml.FullLoader) img_size = model_configs['model_args']['img_size'] + conf_thres = model_configs['model_args']['conf_thres'] iou_thres = model_configs['model_args']['iou_thres'] device = model_configs['model_args']['device'] @@ -157,6 +158,8 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): for idx, image in tqdm(enumerate(img_list)): img_H, img_W = image.shape[0], image.shape[1] layout_res = rough_layout(layout_model, image) + print(image.shape) + raise latex_filling_list_new, mf_image_list_new = fine_grained_layout(image, layout_res) latex_filling_list.extend(latex_filling_list_new) mf_image_list.extend(mf_image_list_new) diff --git a/rough_layout.py b/rough_layout.py index 4ce1850..d27b8a9 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -1,61 +1,79 @@ +from get_batch_yolo import mfd_process, get_batch_YOLO_model +from get_batch_layout_model import get_layout_model from get_data_utils import * - from typing import List from PIL import Image import numpy as np from torch.utils.data import IterableDataset, DataLoader -from transformers import NougatImageProcessor +import torch +import numpy as np +from tqdm.auto import tqdm +import yaml +from dataaccelerate import DataPrefetcher +from torch.utils.data import IterableDataset, get_worker_info + UNIFIED_WIDTH = 1650 UNIFIED_HEIGHT = 2150 -def process_pdf_page_to_image(page, dpi): - - - pix = page.get_pixmap(matrix=fitz.Matrix(dpi/72, dpi/72)) - image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) - - # if pix.width > UNIFIED_WIDTH or pix.height > UNIFIED_HEIGHT: - # pix = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False) - # image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) +def pad_image_to_ratio(image, target_ratio): + """ + Pads the given PIL.Image object to fit the specified width-height ratio + by adding padding only to the bottom and right sides. + :param image: PIL.Image object + :param target_ratio: Desired width/height ratio (e.g., 16/9) + :return: New PIL.Image object with the padding applied + """ + # Original dimensions + orig_width, orig_height = image.size + orig_ratio = orig_width / orig_height - # Resize the image while maintaining aspect ratio - img_aspect = image.width / image.height - target_aspect = UNIFIED_WIDTH / UNIFIED_HEIGHT - - if img_aspect > target_aspect: - # Image is wider than target aspect ratio - new_width = UNIFIED_WIDTH - new_height = int(UNIFIED_WIDTH / img_aspect) + # Calculate target dimensions + if orig_ratio < target_ratio: + # Taller than target ratio, pad width + target_height = orig_height + target_width = int(target_height * target_ratio) else: - # Image is taller than target aspect ratio - new_width = int(UNIFIED_HEIGHT * img_aspect) - new_height = UNIFIED_HEIGHT + # Wider than target ratio, pad height + target_width = orig_width + target_height = int(target_width / target_ratio) - # Resize the image - image = image.resize((new_width, new_height), Image.ANTIALIAS) + # Calculate padding needed + pad_right = target_width - orig_width + pad_bottom = target_height - orig_height + + # Create new image with target dimensions and a white background + new_image = Image.new("RGB", (target_width, target_height), (255, 255, 255)) + new_image.paste(image, (0, 0)) - pad_width = (UNIFIED_WIDTH - image.width) // 2 - pad_height = (UNIFIED_HEIGHT - image.height) // 2 - - # Create a new image with the unified size and paste the resized image onto it with padding - new_image = Image.new('RGB', (UNIFIED_WIDTH, UNIFIED_HEIGHT), (255, 255, 255)) - new_image.paste(image, (pad_width, pad_height)) - return new_image +def process_pdf_page_to_image(page, dpi): + pix = page.get_pixmap(matrix=fitz.Matrix(dpi/72, dpi/72)) + if pix.width > 3000 or pix.height > 3000: + pix = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False) + image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) + else: + image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) + + image = pad_image_to_ratio(image, UNIFIED_WIDTH / UNIFIED_HEIGHT) + + image = np.array(image)[:,:,::-1] + return image.copy() class PDFImageDataset(IterableDataset): client = None - def __init__(self, metadata_filepath): + def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform): super().__init__() self.metadata= self.smart_read_json(metadata_filepath) self.pathlist= [t['path'] for t in self.metadata] self.last_read_pdf_buffer = {} self.dpi = 200 - + self.aug = aug + self.input_format = input_format + self.mfd_pre_transform = mfd_pre_transform def smart_read_json(self, json_path): if "s3" in json_path and self.client is None: self.client = build_client() if json_path.startswith("s3"): json_path = "opendata:"+ json_path @@ -91,15 +109,19 @@ def get_pdf_by_index(self,index): return self.get_pdf_buffer(pdf_path) - - - def __len__(self): - return len(self.pathlist) - def __iter__(self): - self.current_pdf_index = 0 - self.current_page_index = 0 - self.last_read_pdf_buffer = {} + worker_info = get_worker_info() + if worker_info is None: # single-process data loading, return the full iterator + self.current_pdf_index = 0 + self.current_page_index = 0 + else: # in a worker process + # split workload + num_workers = worker_info.num_workers + worker_id = worker_info.id + self.current_pdf_index = worker_id + self.current_page_index = 0 + + self.pdf = self.get_pdf_by_index(self.current_pdf_index) return self @property @@ -112,42 +134,127 @@ def __next__(self): raise StopIteration if self.current_page_index >= self.get_pdf_by_index(self.current_pdf_index).page_count: - self.current_pdf_index += 1 + worker_info = get_worker_info() + step_for_pdf= 1 if worker_info is None else worker_info.num_workers + self.current_pdf_index += step_for_pdf self.current_page_index = 0 if self.current_pdf_index >= len(self.pathlist): raise StopIteration page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) - image = process_pdf_page_to_image(page, self.dpi) - - image = np.array(image)[:,:,::-1].copy() - - return image + self.current_page_index += 1 + oimage = process_pdf_page_to_image(page, self.dpi) + original_image = oimage[:, :, ::-1] if self.input_format == "RGB" else oimage + height, width = original_image.shape[:2] + image = self.aug.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] + mfd_image=self.prepare_for_mfd_model(oimage) + #print(self.current_pdf_index, self.current_page_index) + return self.current_pdf_index, mfd_image, image, height, width -dataset = PDFImageDataset("part-66210c190659-000035.jsonl") -dataloader = DataLoader(dataset, batch_size=4) + def prepare_for_mfd_model(self, im:np.ndarray): + if self.mfd_pre_transform is None :return im + assert im.ndim==3 + im = [im] + im = np.stack(self.mfd_pre_transform(im)) + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w) + im = np.ascontiguousarray(im) # contiguous + im = im.astype('float')/255 + im = torch.from_numpy(im) + return im[0] -import yaml -from modules.layoutlmv3.model_init import Layoutlmv3_Predictor -def rough_layout(layout_model, image): - layout_res = layout_model(image, ignore_catids=[]) - return layout_res -def layout_model_init(weight): - model = Layoutlmv3_Predictor(weight) - return model - -with open('configs/model_configs.yaml') as f: - model_configs = yaml.load(f, Loader=yaml.FullLoader) -layout_model = layout_model_init(model_configs['model_args']['layout_weight']) -img_size = model_configs['model_args']['img_size'] -conf_thres= model_configs['model_args']['conf_thres'] -iou_thres = model_configs['model_args']['iou_thres'] -device = model_configs['model_args']['device'] -dpi = model_configs['model_args']['pdf_dpi'] - -for i, images in enumerate(dataloader): - print(f"Batch {i + 1}") - print(images.shape) # Should print torch.Size([4, 3, 224, 224]) - layout_res = layout_model(images, ignore_catids=[]) - print(layout_res.shape) # Should print torch.Size([4, 3, 224, 224]) +def custom_collate_fn(batch): + oimages = [] + images = [] + heights = [] + widths = [] + + for oimage, image, height, width in batch: + oimages.append(oimage) + images.append(torch.tensor(image)) + heights.append(torch.tensor(height)) + widths.append(torch.tensor(width)) + + images = torch.stack(images) + heights = torch.stack(heights) + widths = torch.stack(widths) + + return oimages, images, heights, widths + +def clean_layout_dets(layout_dets): + rows = [] + for t in layout_dets: + rows.append({ + "category_id":int(t['category_id']), + "poly":[int(t) for t in t['poly']], + "score":float(t['score']) + }) + + return rows + +def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32): + dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, + mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) + + dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=None, num_workers=8) + featcher = DataPrefetcher(dataloader,device='cuda') + data_to_save = [] + inner_batch_size = inner_batch_size + pbar = tqdm(total=len(dataset.pathlist),position=1,leave=True,desc="PDF Pages") + pdf_passed = set() + batch = featcher.next() + while batch is not None: + pdf_index,oimages_batch,images_batch,heights_batch, widths_batch = batch + #for pdf_index,oimages_batch,images_batch,heights_batch, widths_batch in dataloader: + pdf_index = set([t.item() for t in pdf_index]) + new_pdf_processed = pdf_index - pdf_passed + pdf_passed = pdf_passed|pdf_index + + for j in tqdm(range(0, len(oimages_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): + oimages = oimages_batch[j:j+inner_batch_size] + images = images_batch[j:j+inner_batch_size] + heights = heights_batch[j:j+inner_batch_size] + widths = widths_batch[j:j+inner_batch_size] + + layout_res = layout_model((images,heights, widths), ignore_catids=[]) + mfd_res = mfd_model.predict(oimages, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) + + for layout_det, mfd_det, layout_height, layout_width in zip(layout_res, mfd_res, heights, widths): + mfd_height,mfd_width = mfd_det.orig_shape + this_row = { + "layout": {'height':int(layout_height), 'width':int(layout_width), 'layout_dets':clean_layout_dets(layout_det['layout_dets'])}, + "mfd":{'height':int(mfd_height), 'width':int(mfd_width), + "bbox_cls" :mfd_det.boxes.cls.cpu().numpy().astype('uint8').tolist(), + "bbox_conf":mfd_det.boxes.conf.cpu().numpy().astype('float16').tolist(), + "bbox_xyxy":mfd_det.boxes.xyxy.cpu().numpy().astype('uint8').tolist(), + } + } + data_to_save.append(this_row) + pbar.update(len(new_pdf_processed)) + batch = featcher.next() + + with open(result_path,'w') as f: + for row in data_to_save: + f.write(json.dumps(row)+'\n') + +if __name__ == "__main__": + + with open('configs/model_configs.yaml') as f: + model_configs = yaml.load(f, Loader=yaml.FullLoader) + + img_size = model_configs['model_args']['img_size'] + conf_thres= model_configs['model_args']['conf_thres'] + iou_thres = model_configs['model_args']['iou_thres'] + device = model_configs['model_args']['device'] + dpi = model_configs['model_args']['pdf_dpi'] + + + layout_model = get_layout_model(model_configs) + mfd_model = get_batch_YOLO_model(model_configs) + deal_with_one_dataset("part-66210c190659-000035.jsonl", "part-66210c190659-000035.stage_1.jsonl", layout_model, mfd_model, inner_batch_size=4, batch_size=32) + # dataset = PDFImageDataset("part-66210c190659-000035.jsonl",layout_model.predictor.aug,layout_model.predictor.input_format,mfd_pre_transform=None) + # dataloader = DataLoader(dataset, batch_size=8,collate_fn=custom_collate_fn) + + + \ No newline at end of file From c334a66ad3d84a5d780ed8cd14642173d38f707c Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 24 Jul 2024 18:37:57 +0800 Subject: [PATCH 04/74] test in S1 --- get_batch_layout_model.py | 2 ++ get_data_utils.py | 17 +++++++++++++++-- rough_layout.py | 37 +++++++++++++++++++++++++------------ 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/get_batch_layout_model.py b/get_batch_layout_model.py index 8f83484..440241e 100644 --- a/get_batch_layout_model.py +++ b/get_batch_layout_model.py @@ -1,3 +1,5 @@ +from detectron2.utils.logger import setup_logger +setup_logger() from modules.layoutlmv3.model_init import * import time, math diff --git a/get_data_utils.py b/get_data_utils.py index 3f8c441..4eb1e28 100644 --- a/get_data_utils.py +++ b/get_data_utils.py @@ -4,6 +4,7 @@ import io import os import fitz +fitz.TOOLS.mupdf_display_errors(on=False) def read_json_from_path(path, client): if "s3" in path: buffer = client.get(path).replace(b'\x00', b'\n') @@ -47,6 +48,18 @@ def read_json_from_path(path, client): else: raise NotImplementedError("please use json or jsonl file") +def write_jsonj_to_path(data, path, client): + if "s3" in path: + byte_object = json.dumps(data).encode('utf-8') + with io.BytesIO(byte_object) as f: + client.put(path, f) + else: + assert not path.startswith('http'), "why you want to save the file to a online path?" + thedir = os.path.dirname(path) + os.makedirs(thedir, exist_ok=True) + with open(path,'w') as f: + json.dump(data, f) + def write_json_to_path(data, path, client): if "s3" in path: byte_object = json.dumps(data).encode('utf-8') @@ -60,10 +73,10 @@ def write_json_to_path(data, path, client): json.dump(data, f) def build_client(): - print(f"we will building ceph client...................") + #print(f"we will building ceph client...................") from petrel_client.client import Client # 安装完成后才可导入 client = Client(conf_path="~/petreloss.conf") # 实例化Petrel Client,然后就可以调用下面的APIs - print(f"done..................") + #print(f"done..................") return client def check_path_exists(path,client): diff --git a/rough_layout.py b/rough_layout.py index d27b8a9..5c2d39f 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -65,10 +65,11 @@ def process_pdf_page_to_image(page, dpi): class PDFImageDataset(IterableDataset): client = None + #client = build_client() def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform): super().__init__() self.metadata= self.smart_read_json(metadata_filepath) - self.pathlist= [t['path'] for t in self.metadata] + #self.pathlist= [t['path'] for t in self.metadata] self.last_read_pdf_buffer = {} self.dpi = 200 self.aug = aug @@ -105,7 +106,7 @@ def get_pdf_buffer(self,path): return pdf_buffer def get_pdf_by_index(self,index): - pdf_path = self.pathlist[index] + pdf_path = self.metadata[index]['path'] return self.get_pdf_buffer(pdf_path) @@ -130,7 +131,7 @@ def current_doc(self): return list(self.last_read_pdf_buffer.values())[0] def __next__(self): - if self.current_pdf_index >= len(self.pathlist): + if self.current_pdf_index >= len(self.metadata): raise StopIteration if self.current_page_index >= self.get_pdf_by_index(self.current_pdf_index).page_count: @@ -139,10 +140,12 @@ def __next__(self): self.current_pdf_index += step_for_pdf self.current_page_index = 0 - if self.current_pdf_index >= len(self.pathlist): + if self.current_pdf_index >= len(self.metadata): raise StopIteration page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) + current_page_index = self.current_page_index + current_pdf_index = self.current_pdf_index self.current_page_index += 1 oimage = process_pdf_page_to_image(page, self.dpi) original_image = oimage[:, :, ::-1] if self.input_format == "RGB" else oimage @@ -151,7 +154,7 @@ def __next__(self): image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] mfd_image=self.prepare_for_mfd_model(oimage) #print(self.current_pdf_index, self.current_page_index) - return self.current_pdf_index, mfd_image, image, height, width + return current_pdf_index, current_page_index, mfd_image, image, height, width def prepare_for_mfd_model(self, im:np.ndarray): if self.mfd_pre_transform is None :return im @@ -193,25 +196,27 @@ def clean_layout_dets(layout_dets): return rows -def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32): +def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32,num_workers=8): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) - dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=None, num_workers=8) + dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=None, num_workers=num_workers) featcher = DataPrefetcher(dataloader,device='cuda') data_to_save = [] inner_batch_size = inner_batch_size - pbar = tqdm(total=len(dataset.pathlist),position=1,leave=True,desc="PDF Pages") + pbar = tqdm(total=len(dataset.metadata),position=1,leave=True,desc="PDF Pages") pdf_passed = set() batch = featcher.next() while batch is not None: - pdf_index,oimages_batch,images_batch,heights_batch, widths_batch = batch + pdf_index_batch,page_ids_batch, oimages_batch,images_batch,heights_batch, widths_batch = batch #for pdf_index,oimages_batch,images_batch,heights_batch, widths_batch in dataloader: - pdf_index = set([t.item() for t in pdf_index]) + pdf_index = set([t.item() for t in pdf_index_batch]) new_pdf_processed = pdf_index - pdf_passed pdf_passed = pdf_passed|pdf_index for j in tqdm(range(0, len(oimages_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): + pdf_index = pdf_index_batch[j:j+inner_batch_size] + page_ids = page_ids_batch[j:j+inner_batch_size] oimages = oimages_batch[j:j+inner_batch_size] images = images_batch[j:j+inner_batch_size] heights = heights_batch[j:j+inner_batch_size] @@ -220,9 +225,14 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ layout_res = layout_model((images,heights, widths), ignore_catids=[]) mfd_res = mfd_model.predict(oimages, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) - for layout_det, mfd_det, layout_height, layout_width in zip(layout_res, mfd_res, heights, widths): + for pdf_id, page_id, layout_det, mfd_det, layout_height, layout_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): mfd_height,mfd_width = mfd_det.orig_shape + pdf_id = int(pdf_id) + page_id= int(page_id) + pdf_path = dataset.metadata[pdf_id]['path'] this_row = { + "pdf_source":pdf_path, + "page_id": page_id, "layout": {'height':int(layout_height), 'width':int(layout_width), 'layout_dets':clean_layout_dets(layout_det['layout_dets'])}, "mfd":{'height':int(mfd_height), 'width':int(mfd_width), "bbox_cls" :mfd_det.boxes.cls.cpu().numpy().astype('uint8').tolist(), @@ -234,6 +244,7 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ pbar.update(len(new_pdf_processed)) batch = featcher.next() + write_json_to_path with open(result_path,'w') as f: for row in data_to_save: f.write(json.dumps(row)+'\n') @@ -252,7 +263,9 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ layout_model = get_layout_model(model_configs) mfd_model = get_batch_YOLO_model(model_configs) - deal_with_one_dataset("part-66210c190659-000035.jsonl", "part-66210c190659-000035.stage_1.jsonl", layout_model, mfd_model, inner_batch_size=4, batch_size=32) + deal_with_one_dataset("debug.jsonl", + "debug.stage_1.jsonl", + layout_model, mfd_model, inner_batch_size=16, batch_size=64,num_workers=16) # dataset = PDFImageDataset("part-66210c190659-000035.jsonl",layout_model.predictor.aug,layout_model.predictor.input_format,mfd_pre_transform=None) # dataloader = DataLoader(dataset, batch_size=8,collate_fn=custom_collate_fn) From 10ccb4020dccf866abf30fe1c821c20d0c6433aa Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 24 Jul 2024 12:49:07 +0000 Subject: [PATCH 05/74] alignmented --- get_batch_yolo.py | 2 +- rough_layout.py | 49 ++++++++++++++++++++++++++++++++--------------- 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/get_batch_yolo.py b/get_batch_yolo.py index 168b699..fcf2dd8 100644 --- a/get_batch_yolo.py +++ b/get_batch_yolo.py @@ -59,7 +59,7 @@ def fastpostprocess(self, preds, img, orig_imgs): results = [] for i, pred in enumerate(preds): orig_img = orig_imgs[i] - pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + #pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) # <-- lets do it outside since now we will feed normlized batch img_path = self.batch[0][i] results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred)) return results diff --git a/rough_layout.py b/rough_layout.py index 5c2d39f..dbc1685 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -66,7 +66,7 @@ def process_pdf_page_to_image(page, dpi): class PDFImageDataset(IterableDataset): client = None #client = build_client() - def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform): + def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform, return_original_image=False): super().__init__() self.metadata= self.smart_read_json(metadata_filepath) #self.pathlist= [t['path'] for t in self.metadata] @@ -75,6 +75,7 @@ def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform): self.aug = aug self.input_format = input_format self.mfd_pre_transform = mfd_pre_transform + self.return_original_image = return_original_image def smart_read_json(self, json_path): if "s3" in json_path and self.client is None: self.client = build_client() if json_path.startswith("s3"): json_path = "opendata:"+ json_path @@ -154,7 +155,10 @@ def __next__(self): image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] mfd_image=self.prepare_for_mfd_model(oimage) #print(self.current_pdf_index, self.current_page_index) - return current_pdf_index, current_page_index, mfd_image, image, height, width + if self.return_original_image: + return current_pdf_index, current_page_index, mfd_image, image, height, width, oimage + else: + return current_pdf_index, current_page_index, mfd_image, image, height, width def prepare_for_mfd_model(self, im:np.ndarray): if self.mfd_pre_transform is None :return im @@ -195,7 +199,7 @@ def clean_layout_dets(layout_dets): }) return rows - +from ultralytics.utils import ops def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32,num_workers=8): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) @@ -208,38 +212,53 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ pdf_passed = set() batch = featcher.next() while batch is not None: - pdf_index_batch,page_ids_batch, oimages_batch,images_batch,heights_batch, widths_batch = batch - #for pdf_index,oimages_batch,images_batch,heights_batch, widths_batch in dataloader: + pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch = batch + #for pdf_index,mfd_images_batch,images_batch,heights_batch, widths_batch in dataloader: pdf_index = set([t.item() for t in pdf_index_batch]) new_pdf_processed = pdf_index - pdf_passed pdf_passed = pdf_passed|pdf_index - for j in tqdm(range(0, len(oimages_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): + for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): pdf_index = pdf_index_batch[j:j+inner_batch_size] page_ids = page_ids_batch[j:j+inner_batch_size] - oimages = oimages_batch[j:j+inner_batch_size] + mfd_images = mfd_images_batch[j:j+inner_batch_size] images = images_batch[j:j+inner_batch_size] heights = heights_batch[j:j+inner_batch_size] widths = widths_batch[j:j+inner_batch_size] layout_res = layout_model((images,heights, widths), ignore_catids=[]) - mfd_res = mfd_model.predict(oimages, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) + mfd_res = mfd_model.predict(mfd_images, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) - for pdf_id, page_id, layout_det, mfd_det, layout_height, layout_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): + for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): mfd_height,mfd_width = mfd_det.orig_shape pdf_id = int(pdf_id) page_id= int(page_id) + real_input_height = int(real_input_height) + real_input_width = int(real_input_width) pdf_path = dataset.metadata[pdf_id]['path'] + + layout_dets = clean_layout_dets(layout_det['layout_dets']) + for xyxy, conf, cla in zip(mfd_det.boxes.xyxy.cpu(), + mfd_det.boxes.conf.cpu(), + mfd_det.boxes.cls.cpu()): + xyxy = ops.scale_boxes(mfd_images.shape[2:], xyxy, (real_input_height, real_input_width)) + xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] + new_item = { + 'category_id': 13 + int(cla.item()), + 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax], + 'score': round(float(conf.item()), 2), + 'latex': '', + } + layout_dets.append(new_item) + + this_row = { "pdf_source":pdf_path, "page_id": page_id, - "layout": {'height':int(layout_height), 'width':int(layout_width), 'layout_dets':clean_layout_dets(layout_det['layout_dets'])}, - "mfd":{'height':int(mfd_height), 'width':int(mfd_width), - "bbox_cls" :mfd_det.boxes.cls.cpu().numpy().astype('uint8').tolist(), - "bbox_conf":mfd_det.boxes.conf.cpu().numpy().astype('float16').tolist(), - "bbox_xyxy":mfd_det.boxes.xyxy.cpu().numpy().astype('uint8').tolist(), - } + 'height':real_input_height, 'width':real_input_width, + "layout_dets":layout_dets, } + data_to_save.append(this_row) pbar.update(len(new_pdf_processed)) batch = featcher.next() From d8dded386c57d0a88078184f40870fa0225b6fcb Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 24 Jul 2024 14:18:14 +0000 Subject: [PATCH 06/74] reformat to match data group format --- get_data_utils.py | 14 ++++++++++++++ rough_layout.py | 43 ++++++++++++++++++++++++++----------------- 2 files changed, 40 insertions(+), 17 deletions(-) diff --git a/get_data_utils.py b/get_data_utils.py index 4eb1e28..ac34cfd 100644 --- a/get_data_utils.py +++ b/get_data_utils.py @@ -71,6 +71,20 @@ def write_json_to_path(data, path, client): os.makedirs(thedir, exist_ok=True) with open(path,'w') as f: json.dump(data, f) +def write_jsonl_to_path(data, path, client): + byte_object = "\n".join([json.dumps(d) for d in data]) + if "s3" in path: + with io.BytesIO(byte_object.encode('utf-8')) as f: + client.put(path, f) + else: + assert not path.startswith('http'), "why you want to save the file to a online path?" + thedir = os.path.dirname(path) + if thedir: + os.makedirs(thedir, exist_ok=True) + with open(path,'w') as f: + f.write(byte_object) + + def build_client(): #print(f"we will building ceph client...................") diff --git a/rough_layout.py b/rough_layout.py index dbc1685..d9a55b1 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -200,13 +200,14 @@ def clean_layout_dets(layout_dets): return rows from ultralytics.utils import ops +import copy def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32,num_workers=8): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=None, num_workers=num_workers) featcher = DataPrefetcher(dataloader,device='cuda') - data_to_save = [] + data_to_save = {} inner_batch_size = inner_batch_size pbar = tqdm(total=len(dataset.metadata),position=1,leave=True,desc="PDF Pages") pdf_passed = set() @@ -236,7 +237,8 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ real_input_height = int(real_input_height) real_input_width = int(real_input_width) pdf_path = dataset.metadata[pdf_id]['path'] - + if pdf_path not in data_to_save: + data_to_save[pdf_path] = {'height':real_input_height, 'width':real_input_width} layout_dets = clean_layout_dets(layout_det['layout_dets']) for xyxy, conf, cla in zip(mfd_det.boxes.xyxy.cpu(), mfd_det.boxes.conf.cpu(), @@ -250,23 +252,30 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ 'latex': '', } layout_dets.append(new_item) - - - this_row = { - "pdf_source":pdf_path, - "page_id": page_id, - 'height':real_input_height, 'width':real_input_width, - "layout_dets":layout_dets, - } - - data_to_save.append(this_row) + data_to_save[pdf_path][page_id] = layout_dets pbar.update(len(new_pdf_processed)) batch = featcher.next() - write_json_to_path - with open(result_path,'w') as f: - for row in data_to_save: - f.write(json.dumps(row)+'\n') + ### next, we construct each result for each pdf in pdf wise and remove the page_id by the list position + pdf_to_metadata = {t['path']:t for t in dataset.metadata} + + new_data_to_save = [] + for pdf_path, layout_dets_per_page in data_to_save.items(): + new_pdf_dict = copy.deepcopy(pdf_to_metadata[pdf_path]) + new_pdf_dict['height'] = layout_dets_per_page.pop('height') + new_pdf_dict['width'] = layout_dets_per_page.pop('width') + pages = [t for t in layout_dets_per_page.keys()] + pages.sort() + new_pdf_dict["doc_layout_result"]=[] + for page_id in range(max(pages)): + if page_id not in layout_dets_per_page: + print(f"WARNING: page {page_id} of PDF: {pdf_path} fail to parser!!! ") + now_row = {"page_id": page_id, "layout_dets":[]} + else: + now_row = {"page_id": page_id, "layout_dets":layout_dets_per_page[page_id]} + new_pdf_dict["doc_layout_result"].append(now_row) + new_data_to_save.append(new_pdf_dict) + write_jsonl_to_path(new_data_to_save, result_path, dataset.client) if __name__ == "__main__": @@ -284,7 +293,7 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ mfd_model = get_batch_YOLO_model(model_configs) deal_with_one_dataset("debug.jsonl", "debug.stage_1.jsonl", - layout_model, mfd_model, inner_batch_size=16, batch_size=64,num_workers=16) + layout_model, mfd_model, inner_batch_size=3, batch_size=3,num_workers=0) # dataset = PDFImageDataset("part-66210c190659-000035.jsonl",layout_model.predictor.aug,layout_model.predictor.input_format,mfd_pre_transform=None) # dataloader = DataLoader(dataset, batch_size=8,collate_fn=custom_collate_fn) From f655e6bc58b410183df63889eeaba6fceaaf6633 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 24 Jul 2024 22:18:51 +0800 Subject: [PATCH 07/74] sync --- pdf_extract.py | 2 +- rough_layout.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pdf_extract.py b/pdf_extract.py index d2b2063..2141466 100644 --- a/pdf_extract.py +++ b/pdf_extract.py @@ -87,7 +87,7 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): mfd_res = mfd_model.predict(image, imgsz=img_size, conf=config.conf_thres, iou=config.iou_thres, verbose=config.verbose)[0] for xyxy, conf, cla in zip(mfd_res.boxes.xyxy.cpu(), mfd_res.boxes.conf.cpu(), - mfd_res.boxes.cls.cpu()): + mfd_res.boxes.cls.cpu() ): xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] new_item = { 'category_id': 13 + int(cla.item()), diff --git a/rough_layout.py b/rough_layout.py index 5c2d39f..d1081b5 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -15,8 +15,8 @@ from dataaccelerate import DataPrefetcher from torch.utils.data import IterableDataset, get_worker_info -UNIFIED_WIDTH = 1650 -UNIFIED_HEIGHT = 2150 +UNIFIED_WIDTH = 1472 +UNIFIED_HEIGHT = 1888 def pad_image_to_ratio(image, target_ratio): """ Pads the given PIL.Image object to fit the specified width-height ratio @@ -216,11 +216,11 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ for j in tqdm(range(0, len(oimages_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): pdf_index = pdf_index_batch[j:j+inner_batch_size] - page_ids = page_ids_batch[j:j+inner_batch_size] - oimages = oimages_batch[j:j+inner_batch_size] - images = images_batch[j:j+inner_batch_size] - heights = heights_batch[j:j+inner_batch_size] - widths = widths_batch[j:j+inner_batch_size] + page_ids = page_ids_batch[j:j+inner_batch_size] + oimages = oimages_batch[j:j+inner_batch_size] + images = images_batch[j:j+inner_batch_size] + heights = heights_batch[j:j+inner_batch_size] + widths = widths_batch[j:j+inner_batch_size] layout_res = layout_model((images,heights, widths), ignore_catids=[]) mfd_res = mfd_model.predict(oimages, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) From 86819809c45c45d34fcc7c60c08961daa701a12c Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Thu, 25 Jul 2024 04:14:38 +0000 Subject: [PATCH 08/74] lets try another size assign --- rough_layout.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rough_layout.py b/rough_layout.py index d9a55b1..f544a25 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -15,8 +15,8 @@ from dataaccelerate import DataPrefetcher from torch.utils.data import IterableDataset, get_worker_info -UNIFIED_WIDTH = 1650 -UNIFIED_HEIGHT = 2150 +UNIFIED_WIDTH = 800 +UNIFIED_HEIGHT = 1042 def pad_image_to_ratio(image, target_ratio): """ Pads the given PIL.Image object to fit the specified width-height ratio @@ -228,7 +228,7 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ widths = widths_batch[j:j+inner_batch_size] layout_res = layout_model((images,heights, widths), ignore_catids=[]) - mfd_res = mfd_model.predict(mfd_images, imgsz=img_size, conf=0.3, iou=0.5, verbose=False) + mfd_res = mfd_model.predict(mfd_images, imgsz=(1888,1472), conf=0.3, iou=0.5, verbose=False) for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): mfd_height,mfd_width = mfd_det.orig_shape From 426f17c408dcbd19805f0355c7d1914ed40eab54 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Mon, 29 Jul 2024 17:14:25 +0800 Subject: [PATCH 09/74] sync --- get_data_utils.py | 25 ++++++++ pdf_extract.py | 94 +++++++++++++++---------------- rough_layout.py | 141 +++++++++++++++++++++++++++------------------- 3 files changed, 155 insertions(+), 105 deletions(-) diff --git a/get_data_utils.py b/get_data_utils.py index ac34cfd..48e0fb5 100644 --- a/get_data_utils.py +++ b/get_data_utils.py @@ -122,6 +122,31 @@ def check_lock_exists(path, client): raise NotImplementedError("please donot use lock lock") return os.path.exists(path) +def check_lock_and_last_start_time(path, client): + if "s3" in path: + raise NotImplementedError("s3 lock not implemented") + elif path.startswith('http'): + assert 'checklocktime' in path, "please use `checklocktime` flag for data path" + response = requests.get(path) + if response.status_code == 200: + content = response.json() + if not content["status"]:return False + return content['start_time'] + else: + return False + else: + raise NotImplementedError("s3 lock not implemented") + +def create_last_start_time_lock(path, client): + if "s3" in path: + raise NotImplementedError("s3 lock not implemented") + elif path.startswith('http'): + assert 'createlocktime' in path, "please use `createlocktime` flag for data path" + response = requests.get(path) + else: + raise NotImplementedError("s3 lock not implemented") + + def read_pdf_from_path(path, client): if "s3" in path: buffer = client.get(path) diff --git a/pdf_extract.py b/pdf_extract.py index 2141466..5dece17 100644 --- a/pdf_extract.py +++ b/pdf_extract.py @@ -158,8 +158,7 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): for idx, image in tqdm(enumerate(img_list)): img_H, img_W = image.shape[0], image.shape[1] layout_res = rough_layout(layout_model, image) - print(image.shape) - raise + latex_filling_list_new, mf_image_list_new = fine_grained_layout(image, layout_res) latex_filling_list.extend(latex_filling_list_new) mf_image_list.extend(mf_image_list_new) @@ -170,52 +169,53 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): ) doc_layout_result.append(layout_res) # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== - # # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== - # a = time.time() - # dataset = MathDataset(mf_image_list, transform=mfr_transform) - # dataloader = DataLoader(dataset, batch_size=128, num_workers=32) - # mfr_res = [] - # for imgs in dataloader: - # imgs = imgs.to(device) - # output = mfr_model.generate({'image': imgs}) - # mfr_res.extend(output['pred_str']) - # for res, latex in zip(latex_filling_list, mfr_res): - # res['latex'] = latex_rm_whitespace(latex) - # b = time.time() - # print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) + # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== + a = time.time() + dataset = MathDataset(mf_image_list, transform=mfr_transform) + dataloader = DataLoader(dataset, batch_size=128, num_workers=32) + mfr_res = [] + for imgs in dataloader: + imgs = imgs.to(device) + output = mfr_model.generate({'image': imgs}) + mfr_res.extend(output['pred_str']) + for res, latex in zip(latex_filling_list, mfr_res): + res['latex'] = latex_rm_whitespace(latex) + b = time.time() + print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) - # # ================ ocr识别 ==================================== - # # - # for idx, image in enumerate(img_list): - # pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - # single_page_res = doc_layout_result[idx]['layout_dets'] - # single_page_mfdetrec_res = [] - # for res in single_page_res: - # if int(res['category_id']) in [13, 14]: - # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - # single_page_mfdetrec_res.append({ - # "bbox": [xmin, ymin, xmax, ymax], - # }) - # for res in single_page_res: - # if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 - # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - # crop_box = [xmin, ymin, xmax, ymax] - # cropped_img = Image.new('RGB', pil_img.size, 'white') - # cropped_img.paste(pil_img.crop(crop_box), crop_box) - # cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) - # ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] - # if ocr_res: - # for box_ocr_res in ocr_res: - # p1, p2, p3, p4 = box_ocr_res[0] - # text, score = box_ocr_res[1] - # doc_layout_result[idx]['layout_dets'].append({ - # 'category_id': 15, - # 'poly': p1 + p2 + p3 + p4, - # 'score': round(score, 2), - # 'text': text, - # }) + # ================ ocr识别 ==================================== + # + for idx, image in enumerate(img_list): + pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) + single_page_res = doc_layout_result[idx]['layout_dets'] + single_page_mfdetrec_res = [] + for res in single_page_res: + if int(res['category_id']) in [13, 14]: + xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + single_page_mfdetrec_res.append({ + "bbox": [xmin, ymin, xmax, ymax], + }) + for res in single_page_res: + if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 + xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + crop_box = [xmin, ymin, xmax, ymax] + cropped_img = Image.new('RGB', pil_img.size, 'white') + cropped_img.paste(pil_img.crop(crop_box), crop_box) + cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) + print(cropped_img.shape) + ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] + if ocr_res: + for box_ocr_res in ocr_res: + p1, p2, p3, p4 = box_ocr_res[0] + text, score = box_ocr_res[1] + doc_layout_result[idx]['layout_dets'].append({ + 'category_id': 15, + 'poly': p1 + p2 + p3 + p4, + 'score': round(score, 2), + 'text': text, + }) # output_dir = args.output # os.makedirs(output_dir, exist_ok=True) diff --git a/rough_layout.py b/rough_layout.py index f544a25..3362bdf 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -131,7 +131,24 @@ def current_doc(self): if len(self.last_read_pdf_buffer)==0:return None return list(self.last_read_pdf_buffer.values())[0] - def __next__(self): + def read_data_based_on_current_state(self): + page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) + oimage = process_pdf_page_to_image(page, self.dpi) + original_image = oimage[:, :, ::-1] if self.input_format == "RGB" else oimage + height, width = original_image.shape[:2] + layout_image = self.aug.get_transform(original_image).apply_image(original_image) + layout_image = torch.as_tensor(layout_image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] + ## lets make sure the image has correct size + if layout_image.size(1) < 1042: + layout_image = torch.nn.functional.pad(layout_image, (0, 0, 0, 1042-layout_image.size(1))) + mfd_image=self.prepare_for_mfd_model(oimage) + #print(self.current_pdf_index, self.current_page_index) + if self.return_original_image: + return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width, oimage + else: + return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width + + def check_should_skip(self): if self.current_pdf_index >= len(self.metadata): raise StopIteration @@ -144,22 +161,25 @@ def __next__(self): if self.current_pdf_index >= len(self.metadata): raise StopIteration - page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) - current_page_index = self.current_page_index - current_pdf_index = self.current_pdf_index - self.current_page_index += 1 - oimage = process_pdf_page_to_image(page, self.dpi) - original_image = oimage[:, :, ::-1] if self.input_format == "RGB" else oimage - height, width = original_image.shape[:2] - image = self.aug.get_transform(original_image).apply_image(original_image) - image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] - mfd_image=self.prepare_for_mfd_model(oimage) - #print(self.current_pdf_index, self.current_page_index) - if self.return_original_image: - return current_pdf_index, current_page_index, mfd_image, image, height, width, oimage - else: - return current_pdf_index, current_page_index, mfd_image, image, height, width + def __next__(self): + fail_times = 0 + try: + self.check_should_skip() + output = self.read_data_based_on_current_state() + self.current_page_index += 1 + fail_times = 0 + except StopIteration: + raise StopIteration + except: + fail_times +=1 + if fail_times>10: + raise StopIteration + return output + + + + def prepare_for_mfd_model(self, im:np.ndarray): if self.mfd_pre_transform is None :return im assert im.ndim==3 @@ -209,51 +229,55 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ featcher = DataPrefetcher(dataloader,device='cuda') data_to_save = {} inner_batch_size = inner_batch_size - pbar = tqdm(total=len(dataset.metadata),position=1,leave=True,desc="PDF Pages") + pbar = tqdm(total=len(dataset.metadata),position=2,desc="PDF Pages",leave=False) pdf_passed = set() batch = featcher.next() while batch is not None: - pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch = batch - #for pdf_index,mfd_images_batch,images_batch,heights_batch, widths_batch in dataloader: - pdf_index = set([t.item() for t in pdf_index_batch]) - new_pdf_processed = pdf_index - pdf_passed - pdf_passed = pdf_passed|pdf_index - - for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): - pdf_index = pdf_index_batch[j:j+inner_batch_size] - page_ids = page_ids_batch[j:j+inner_batch_size] - mfd_images = mfd_images_batch[j:j+inner_batch_size] - images = images_batch[j:j+inner_batch_size] - heights = heights_batch[j:j+inner_batch_size] - widths = widths_batch[j:j+inner_batch_size] - - layout_res = layout_model((images,heights, widths), ignore_catids=[]) - mfd_res = mfd_model.predict(mfd_images, imgsz=(1888,1472), conf=0.3, iou=0.5, verbose=False) + try: + pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch = batch + #for pdf_index,mfd_images_batch,images_batch,heights_batch, widths_batch in dataloader: + pdf_index = set([t.item() for t in pdf_index_batch]) + new_pdf_processed = pdf_index - pdf_passed + pdf_passed = pdf_passed|pdf_index - for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): - mfd_height,mfd_width = mfd_det.orig_shape - pdf_id = int(pdf_id) - page_id= int(page_id) - real_input_height = int(real_input_height) - real_input_width = int(real_input_width) - pdf_path = dataset.metadata[pdf_id]['path'] - if pdf_path not in data_to_save: - data_to_save[pdf_path] = {'height':real_input_height, 'width':real_input_width} - layout_dets = clean_layout_dets(layout_det['layout_dets']) - for xyxy, conf, cla in zip(mfd_det.boxes.xyxy.cpu(), - mfd_det.boxes.conf.cpu(), - mfd_det.boxes.cls.cpu()): - xyxy = ops.scale_boxes(mfd_images.shape[2:], xyxy, (real_input_height, real_input_width)) - xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] - new_item = { - 'category_id': 13 + int(cla.item()), - 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax], - 'score': round(float(conf.item()), 2), - 'latex': '', - } - layout_dets.append(new_item) - data_to_save[pdf_path][page_id] = layout_dets - pbar.update(len(new_pdf_processed)) + for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=3,leave=False,desc="mini-Batch"): + pdf_index = pdf_index_batch[j:j+inner_batch_size] + page_ids = page_ids_batch[j:j+inner_batch_size] + mfd_images = mfd_images_batch[j:j+inner_batch_size] + images = images_batch[j:j+inner_batch_size] + heights = heights_batch[j:j+inner_batch_size] + widths = widths_batch[j:j+inner_batch_size] + + layout_res = layout_model((images,heights, widths), ignore_catids=[]) + mfd_res = mfd_model.predict(mfd_images, imgsz=(1888,1472), conf=0.3, iou=0.5, verbose=False) + + for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): + mfd_height,mfd_width = mfd_det.orig_shape + pdf_id = int(pdf_id) + page_id= int(page_id) + real_input_height = int(real_input_height) + real_input_width = int(real_input_width) + pdf_path = dataset.metadata[pdf_id]['path'] + if pdf_path not in data_to_save: + data_to_save[pdf_path] = {'height':real_input_height, 'width':real_input_width} + layout_dets = clean_layout_dets(layout_det['layout_dets']) + for xyxy, conf, cla in zip(mfd_det.boxes.xyxy.cpu(), + mfd_det.boxes.conf.cpu(), + mfd_det.boxes.cls.cpu()): + xyxy = ops.scale_boxes(mfd_images.shape[2:], xyxy, (real_input_height, real_input_width)) + xmin, ymin, xmax, ymax = [int(p.item()) for p in xyxy] + new_item = { + 'category_id': 13 + int(cla.item()), + 'poly': [xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax], + 'score': round(float(conf.item()), 2), + 'latex': '', + } + layout_dets.append(new_item) + data_to_save[pdf_path][page_id] = layout_dets + pbar.update(len(new_pdf_processed)) + except: + print("ERROR: Fail to process batch") + batch = featcher.next() ### next, we construct each result for each pdf in pdf wise and remove the page_id by the list position @@ -270,11 +294,12 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ for page_id in range(max(pages)): if page_id not in layout_dets_per_page: print(f"WARNING: page {page_id} of PDF: {pdf_path} fail to parser!!! ") - now_row = {"page_id": page_id, "layout_dets":[]} + now_row = {"page_id": page_id, "status": "fail", "layout_dets":[]} else: now_row = {"page_id": page_id, "layout_dets":layout_dets_per_page[page_id]} new_pdf_dict["doc_layout_result"].append(now_row) new_data_to_save.append(new_pdf_dict) + if dataset.client is None:dataset.client=build_client() write_jsonl_to_path(new_data_to_save, result_path, dataset.client) if __name__ == "__main__": From d50097aafcf3ff2c96e6f6e1af26a6a498172192 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Mon, 29 Jul 2024 09:15:11 +0000 Subject: [PATCH 10/74] what new? --- modules/self_modify.py | 66 ++++++++++++- pdf_extract.py | 210 ++++++++++++++++++++--------------------- rough_layout.py | 24 +++-- 3 files changed, 185 insertions(+), 115 deletions(-) diff --git a/modules/self_modify.py b/modules/self_modify.py index 1f830b8..2868964 100644 --- a/modules/self_modify.py +++ b/modules/self_modify.py @@ -11,6 +11,7 @@ from ppocr.utils.logging import get_logger from ppocr.utils.utility import check_and_read, alpha_to_color, binarize_img from tools.infer.utility import draw_ocr_box_txt, get_rotate_crop_image, get_minarea_rect_crop +from .batch_text_detector import BatchTextDetector logger = get_logger() def img_decode(content: bytes): @@ -120,6 +121,67 @@ def update_det_boxes(dt_boxes, mfdetrec_res): return new_dt_boxes class ModifiedPaddleOCR(PaddleOCR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.batch_det_model = BatchTextDetector() + + def batch_detect(self, img_list, mfd_res_list, ori_im_list, cls=True): + time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0} + start = time.time() + + dt_boxes_list, elapse = self.batch_det_model(img_list) + time_dict['det'] = elapse + + dt_boxes_list_new = [] + for mfd_res in mfd_res_list: + if mfd_res: + bef = time.time() + dt_boxes = update_det_boxes(dt_boxes, mfd_res) + aft = time.time() + logger.debug("split text box by formula, new dt_boxes num : {}, elapsed : {}".format(len(dt_boxes), aft-bef)) + dt_boxes_list_new.append(dt_boxes) + dt_boxes_list = dt_boxes_list_new + + img_crop_list = [] + boxes_partition= [0] + for dt_boxes, ori_im in zip(dt_boxes_list, ori_im_list): + for bno in range(len(dt_boxes)): + tmp_box = copy.deepcopy(dt_boxes[bno]) + if self.args.det_box_type == "quad": + img_crop = get_rotate_crop_image(ori_im, tmp_box) + else: + img_crop = get_minarea_rect_crop(ori_im, tmp_box) + img_crop_list.append(img_crop) + boxes_partition.append(len(img_crop_list)) + + if self.use_angle_cls and cls: + img_crop_list, angle_list, elapse = self.text_classifier(img_crop_list) + time_dict['cls'] = elapse + logger.debug("cls num : {}, elapsed : {}".format(len(img_crop_list), elapse)) + + rec_res, elapse = self.text_recognizer(img_crop_list) + time_dict['rec'] = elapse + logger.debug("rec_res num : {}, elapsed : {}".format(len(rec_res), elapse)) + # if self.args.save_crop_res:self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list,rec_res) + + filter_boxes_list =[] + filter_rec_res_list=[] + for partition,(dt_boxes, ori_im) in enumerate(zip(dt_boxes_list, ori_im_list)): + filter_boxes, filter_rec_res = [], [] + rec_res = rec_res[boxes_partition[partition]:boxes_partition[partition+1]] + for box, rec_result in zip(dt_boxes, rec_res): + text, score = rec_result + if score >= self.drop_score: + filter_boxes.append(box) + filter_rec_res.append(rec_result) + filter_boxes_list.append(filter_boxes) + filter_rec_res_list.append(filter_rec_res) + + end = time.time() + time_dict['all'] = end - start + return filter_boxes, filter_rec_res, time_dict + + def ocr(self, img, det=True, rec=True, cls=True, bin=False, inv=False, mfd_res=None, alpha_color=(255, 255, 255)): """ OCR with PaddleOCR @@ -235,6 +297,7 @@ def __call__(self, img, cls=True, mfd_res=None): else: img_crop = get_minarea_rect_crop(ori_im, tmp_box) img_crop_list.append(img_crop) + if self.use_angle_cls and cls: img_crop_list, angle_list, elapse = self.text_classifier( img_crop_list) @@ -244,8 +307,7 @@ def __call__(self, img, cls=True, mfd_res=None): rec_res, elapse = self.text_recognizer(img_crop_list) time_dict['rec'] = elapse - logger.debug("rec_res num : {}, elapsed : {}".format( - len(rec_res), elapse)) + logger.debug("rec_res num : {}, elapsed : {}".format(len(rec_res), elapse)) if self.args.save_crop_res: self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list, rec_res) diff --git a/pdf_extract.py b/pdf_extract.py index 2141466..d9c9de7 100644 --- a/pdf_extract.py +++ b/pdf_extract.py @@ -81,7 +81,7 @@ class FineGrainedConfig: iou_thres: float = 0.5 verbose: bool = False -def fine_grained_layout(image, layout_res, config:FineGrainedConfig): +def fine_grained_layout(mfd_model,image, layout_res, config:FineGrainedConfig): latex_filling_list = [] mf_image_list = [] mfd_res = mfd_model.predict(image, imgsz=img_size, conf=config.conf_thres, iou=config.iou_thres, verbose=config.verbose)[0] @@ -142,6 +142,7 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): else: all_pdfs = [args.pdf] print("total files:", len(all_pdfs)) + fineconfig = FineGrainedConfig() for idx, single_pdf in enumerate(all_pdfs): try: img_list = load_pdf_fitz(single_pdf, dpi=dpi) @@ -158,9 +159,8 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): for idx, image in tqdm(enumerate(img_list)): img_H, img_W = image.shape[0], image.shape[1] layout_res = rough_layout(layout_model, image) - print(image.shape) - raise - latex_filling_list_new, mf_image_list_new = fine_grained_layout(image, layout_res) + + latex_filling_list_new, mf_image_list_new = fine_grained_layout(image, layout_res,fineconfig) latex_filling_list.extend(latex_filling_list_new) mf_image_list.extend(mf_image_list_new) layout_res['page_info'] = dict( @@ -169,110 +169,110 @@ def fine_grained_layout(image, layout_res, config:FineGrainedConfig): width = img_W ) doc_layout_result.append(layout_res) - # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== - # # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== - # a = time.time() - # dataset = MathDataset(mf_image_list, transform=mfr_transform) - # dataloader = DataLoader(dataset, batch_size=128, num_workers=32) - # mfr_res = [] - # for imgs in dataloader: - # imgs = imgs.to(device) - # output = mfr_model.generate({'image': imgs}) - # mfr_res.extend(output['pred_str']) - # for res, latex in zip(latex_filling_list, mfr_res): - # res['latex'] = latex_rm_whitespace(latex) - # b = time.time() - # print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) + # ================ 公式识别,因为识别速度较慢,为了提速,把单个pdf的所有公式裁剪完,一起批量做识别。 ==================================== + a = time.time() + dataset = MathDataset(mf_image_list, transform=mfr_transform) + dataloader = DataLoader(dataset, batch_size=128, num_workers=32) + mfr_res = [] + for imgs in dataloader: + imgs = imgs.to(device) + output = mfr_model.generate({'image': imgs}) + mfr_res.extend(output['pred_str']) + for res, latex in zip(latex_filling_list, mfr_res): + res['latex'] = latex_rm_whitespace(latex) + b = time.time() + print("formula nums:", len(mf_image_list), "mfr time:", round(b-a, 2)) - # # ================ ocr识别 ==================================== - # # - # for idx, image in enumerate(img_list): - # pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - # single_page_res = doc_layout_result[idx]['layout_dets'] - # single_page_mfdetrec_res = [] - # for res in single_page_res: - # if int(res['category_id']) in [13, 14]: - # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - # single_page_mfdetrec_res.append({ - # "bbox": [xmin, ymin, xmax, ymax], - # }) - # for res in single_page_res: - # if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 - # xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) - # xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) - # crop_box = [xmin, ymin, xmax, ymax] - # cropped_img = Image.new('RGB', pil_img.size, 'white') - # cropped_img.paste(pil_img.crop(crop_box), crop_box) - # cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) - # ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] - # if ocr_res: - # for box_ocr_res in ocr_res: - # p1, p2, p3, p4 = box_ocr_res[0] - # text, score = box_ocr_res[1] - # doc_layout_result[idx]['layout_dets'].append({ - # 'category_id': 15, - # 'poly': p1 + p2 + p3 + p4, - # 'score': round(score, 2), - # 'text': text, - # }) + # ================ ocr识别 ==================================== + # + for idx, image in enumerate(img_list): + pil_img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) + single_page_res = doc_layout_result[idx]['layout_dets'] + single_page_mfdetrec_res = [] + for res in single_page_res: + if int(res['category_id']) in [13, 14]: + xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + single_page_mfdetrec_res.append({ + "bbox": [xmin, ymin, xmax, ymax], + }) + for res in single_page_res: + if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 + xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + crop_box = [xmin, ymin, xmax, ymax] + cropped_img = Image.new('RGB', pil_img.size, 'white') + cropped_img.paste(pil_img.crop(crop_box), crop_box) + cropped_img = cv2.cvtColor(np.asarray(cropped_img), cv2.COLOR_RGB2BGR) + print(cropped_img.shape) + ocr_res = ocr_model.ocr(cropped_img, mfd_res=single_page_mfdetrec_res)[0] + if ocr_res: + for box_ocr_res in ocr_res: + p1, p2, p3, p4 = box_ocr_res[0] + text, score = box_ocr_res[1] + doc_layout_result[idx]['layout_dets'].append({ + 'category_id': 15, + 'poly': p1 + p2 + p3 + p4, + 'score': round(score, 2), + 'text': text, + }) - # output_dir = args.output - # os.makedirs(output_dir, exist_ok=True) - # basename = os.path.basename(single_pdf)[0:-4] - # with open(os.path.join(output_dir, f'{basename}.json'), 'w') as f: - # json.dump(doc_layout_result, f) + output_dir = args.output + os.makedirs(output_dir, exist_ok=True) + basename = os.path.basename(single_pdf)[0:-4] + with open(os.path.join(output_dir, f'{basename}.json'), 'w') as f: + json.dump(doc_layout_result, f) - # if args.vis: - # color_palette = [ - # (255,64,255),(255,255,0),(0,255,255),(255,215,135),(215,0,95),(100,0,48),(0,175,0),(95,0,95),(175,95,0),(95,95,0), - # (95,95,255),(95,175,135),(215,95,0),(0,0,255),(0,255,0),(255,0,0),(0,95,215),(0,0,0),(0,0,0),(0,0,0) - # ] - # id2names = ["title", "plain_text", "abandon", "figure", "figure_caption", "table", "table_caption", "table_footnote", - # "isolate_formula", "formula_caption", " ", " ", " ", "inline_formula", "isolated_formula", "ocr_text"] - # vis_pdf_result = [] - # for idx, image in enumerate(img_list): - # single_page_res = doc_layout_result[idx]['layout_dets'] - # vis_img = Image.new('RGB', Image.fromarray(image).size, 'white') if args.render else Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - # draw = ImageDraw.Draw(vis_img) - # for res in single_page_res: - # label = int(res['category_id']) - # if label > 15: # 筛选要可视化的类别 - # continue - # label_name = id2names[label] - # x_min, y_min = int(res['poly'][0]), int(res['poly'][1]) - # x_max, y_max = int(res['poly'][4]), int(res['poly'][5]) - # if args.render and label in [13, 14, 15]: - # try: - # if label in [13, 14]: # 渲染公式 - # window_img = tex2pil(res['latex'])[0] - # else: - # if True: # 渲染中文 - # window_img = zhtext2pil(res['text']) - # else: # 渲染英文 - # window_img = tex2pil([res['text']], tex_type="text")[0] - # ratio = min((x_max - x_min) / window_img.width, (y_max - y_min) / window_img.height) - 0.05 - # window_img = window_img.resize((int(window_img.width * ratio), int(window_img.height * ratio))) - # vis_img.paste(window_img, (int(x_min + (x_max-x_min-window_img.width) / 2), int(y_min + (y_max-y_min-window_img.height) / 2))) - # except Exception as e: - # print(f"got exception on {text}, error info: {e}") - # draw.rectangle([x_min, y_min, x_max, y_max], fill=None, outline=color_palette[label], width=1) - # fontText = ImageFont.truetype("assets/fonts/simhei.ttf", 15, encoding="utf-8") - # draw.text((x_min, y_min), label_name, color_palette[label], font=fontText) + if args.vis: + color_palette = [ + (255,64,255),(255,255,0),(0,255,255),(255,215,135),(215,0,95),(100,0,48),(0,175,0),(95,0,95),(175,95,0),(95,95,0), + (95,95,255),(95,175,135),(215,95,0),(0,0,255),(0,255,0),(255,0,0),(0,95,215),(0,0,0),(0,0,0),(0,0,0) + ] + id2names = ["title", "plain_text", "abandon", "figure", "figure_caption", "table", "table_caption", "table_footnote", + "isolate_formula", "formula_caption", " ", " ", " ", "inline_formula", "isolated_formula", "ocr_text"] + vis_pdf_result = [] + for idx, image in enumerate(img_list): + single_page_res = doc_layout_result[idx]['layout_dets'] + vis_img = Image.new('RGB', Image.fromarray(image).size, 'white') if args.render else Image.fromarray(cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) + draw = ImageDraw.Draw(vis_img) + for res in single_page_res: + label = int(res['category_id']) + if label > 15: # 筛选要可视化的类别 + continue + label_name = id2names[label] + x_min, y_min = int(res['poly'][0]), int(res['poly'][1]) + x_max, y_max = int(res['poly'][4]), int(res['poly'][5]) + if args.render and label in [13, 14, 15]: + try: + if label in [13, 14]: # 渲染公式 + window_img = tex2pil(res['latex'])[0] + else: + if True: # 渲染中文 + window_img = zhtext2pil(res['text']) + else: # 渲染英文 + window_img = tex2pil([res['text']], tex_type="text")[0] + ratio = min((x_max - x_min) / window_img.width, (y_max - y_min) / window_img.height) - 0.05 + window_img = window_img.resize((int(window_img.width * ratio), int(window_img.height * ratio))) + vis_img.paste(window_img, (int(x_min + (x_max-x_min-window_img.width) / 2), int(y_min + (y_max-y_min-window_img.height) / 2))) + except Exception as e: + print(f"got exception on {text}, error info: {e}") + draw.rectangle([x_min, y_min, x_max, y_max], fill=None, outline=color_palette[label], width=1) + fontText = ImageFont.truetype("assets/fonts/simhei.ttf", 15, encoding="utf-8") + draw.text((x_min, y_min), label_name, color_palette[label], font=fontText) - # width, height = vis_img.size - # width, height = int(0.75*width), int(0.75*height) - # vis_img = vis_img.resize((width, height)) - # vis_pdf_result.append(vis_img) + width, height = vis_img.size + width, height = int(0.75*width), int(0.75*height) + vis_img = vis_img.resize((width, height)) + vis_pdf_result.append(vis_img) - # first_page = vis_pdf_result.pop(0) - # first_page.save(os.path.join(output_dir, f'{basename}.pdf'), 'PDF', resolution=100, save_all=True, append_images=vis_pdf_result) - # try: - # shutil.rmtree('./temp') - # except: - # pass + first_page = vis_pdf_result.pop(0) + first_page.save(os.path.join(output_dir, f'{basename}.pdf'), 'PDF', resolution=100, save_all=True, append_images=vis_pdf_result) + try: + shutil.rmtree('./temp') + except: + pass - # now = datetime.datetime.now(tz) - # end = time.time() - # print(now.strftime('%Y-%m-%d %H:%M:%S')) - # print('Finished! time cost:', int(end-start), 's') \ No newline at end of file + now = datetime.datetime.now(tz) + end = time.time() + print(now.strftime('%Y-%m-%d %H:%M:%S')) + print('Finished! time cost:', int(end-start), 's') \ No newline at end of file diff --git a/rough_layout.py b/rough_layout.py index f544a25..4f0bdcb 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -220,16 +220,24 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ pdf_passed = pdf_passed|pdf_index for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=2,leave=False,desc="mini-Batch"): - pdf_index = pdf_index_batch[j:j+inner_batch_size] - page_ids = page_ids_batch[j:j+inner_batch_size] + pdf_index = pdf_index_batch[j:j+inner_batch_size] + page_ids = page_ids_batch[j:j+inner_batch_size] mfd_images = mfd_images_batch[j:j+inner_batch_size] - images = images_batch[j:j+inner_batch_size] - heights = heights_batch[j:j+inner_batch_size] - widths = widths_batch[j:j+inner_batch_size] - - layout_res = layout_model((images,heights, widths), ignore_catids=[]) - mfd_res = mfd_model.predict(mfd_images, imgsz=(1888,1472), conf=0.3, iou=0.5, verbose=False) + images = images_batch[j:j+inner_batch_size] + heights = heights_batch[j:j+inner_batch_size] + widths = widths_batch[j:j+inner_batch_size] + + ### the dataset will return the normlized image data in images. + ### if your set the return_oimage=True, it will also return the oimage append in each batch. + ### Currently, the oimage will pad the resize unitil match the layout model ratio which is UNIFIED_WIDTH / UNIFIED_HEIGHT =800/1042 + ### please take it note that the oimage always hold the ratio as 800/1042. # we will right white pad and bottom white pad so the bbox in pixel is not effected. + ### for mfd model, it will always be processed into the mfd_model.predictor.args.imgsz size such as (1888,1472) + ### in the later processing the mfd result will be `ops.scale_boxes` into original image size. which is the oimage size. which must be the ratio UNIFIED_WIDTH / UNIFIED_HEIGHT =800/1042. (Notice it may not be the origin pdf size) + layout_res = layout_model((images, heights, widths), ignore_catids=[]) + mfd_res = mfd_model.predict(mfd_images, imgsz=mfd_model.predictor.args.imgsz, conf=0.3, iou=0.5, verbose=False) + + for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): mfd_height,mfd_width = mfd_det.orig_shape pdf_id = int(pdf_id) From b40a55884481370d0754a0fa60afabfd846e47a7 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Tue, 30 Jul 2024 07:03:10 +0000 Subject: [PATCH 11/74] add ocr batch processing --- dataaccelerate.py | 2 +- get_batch_layout_model.py | 71 +------------- rough_layout.py | 202 ++++++++++++++++++++++++-------------- 3 files changed, 130 insertions(+), 145 deletions(-) diff --git a/dataaccelerate.py b/dataaccelerate.py index c7f74dc..a9ea5d7 100644 --- a/dataaccelerate.py +++ b/dataaccelerate.py @@ -11,7 +11,7 @@ def sendall2gpu(listinlist,device): elif isinstance(listinlist, (dict)): return dict([(key,sendall2gpu(val,device)) for key,val in listinlist.items()]) elif isinstance(listinlist, np.ndarray): - return torch.from_numpy(listinlist).to(device=device, non_blocking=True) + return listinlist else: return listinlist.to(device=device, non_blocking=True) try: diff --git a/get_batch_layout_model.py b/get_batch_layout_model.py index 440241e..2e22237 100644 --- a/get_batch_layout_model.py +++ b/get_batch_layout_model.py @@ -1,76 +1,7 @@ from detectron2.utils.logger import setup_logger setup_logger() from modules.layoutlmv3.model_init import * - -import time, math -class _DummyTimer: - """A dummy timer that does nothing.""" - - def __enter__(self): - return self - - def __exit__(self, *args): - pass - -class _Timer: - """Timer.""" - - def __init__(self, name): - self.name = name - self.count = 0 - self.mean = 0.0 - self.sum_squares = 0.0 - self.start_time = None - - def __enter__(self): - self.start_time = time.time() - - def __exit__(self, exc_type, exc_value, exc_traceback): - elapsed_time = time.time() - self.start_time - self.update(elapsed_time) - self.start_time = None - - def update(self, elapsed_time): - self.count += 1 - delta = elapsed_time - self.mean - self.mean += delta / self.count - delta2 = elapsed_time - self.mean - self.sum_squares += delta * delta2 - - def mean_elapsed(self): - return self.mean - - def std_elapsed(self): - if self.count > 1: - variance = self.sum_squares / (self.count - 1) - return math.sqrt(variance) - else: - return 0.0 - -class Timers: - """Group of timers.""" - - def __init__(self, activate=False): - self.timers = {} - self.activate = activate - def __call__(self, name): - if not self.activate:return _DummyTimer() - if name not in self.timers: - self.timers[name] = _Timer(name) - return self.timers[name] - - def log(self, names=None, normalizer=1.0): - """Log a group of timers.""" - assert normalizer > 0.0 - if names is None: - names = self.timers.keys() - print("Timer Results:") - for name in names: - mean_elapsed = self.timers[name].mean_elapsed() * 1000.0 / normalizer - std_elapsed = self.timers[name].std_elapsed() * 1000.0 / normalizer - space_num = " "*name.count('/') - print(f"{space_num}{name}: {mean_elapsed:.2f}±{std_elapsed:.2f} ms") - +from utils import Timers def inference( self, batched_inputs: List[Dict[str, torch.Tensor]], diff --git a/rough_layout.py b/rough_layout.py index 3362bdf..938b4c9 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -15,9 +15,9 @@ from dataaccelerate import DataPrefetcher from torch.utils.data import IterableDataset, get_worker_info -UNIFIED_WIDTH = 800 -UNIFIED_HEIGHT = 1042 -def pad_image_to_ratio(image, target_ratio): +UNIFIED_WIDTH = 1472 # lets always make the oimage in such size +UNIFIED_HEIGHT = 1920 # lets always make the oimage in such size +def pad_image_to_ratio(image, output_width = UNIFIED_WIDTH,output_height=UNIFIED_HEIGHT, ): """ Pads the given PIL.Image object to fit the specified width-height ratio by adding padding only to the bottom and right sides. @@ -27,25 +27,24 @@ def pad_image_to_ratio(image, target_ratio): :return: New PIL.Image object with the padding applied """ # Original dimensions - orig_width, orig_height = image.size - orig_ratio = orig_width / orig_height - - # Calculate target dimensions - if orig_ratio < target_ratio: - # Taller than target ratio, pad width - target_height = orig_height - target_width = int(target_height * target_ratio) - else: - # Wider than target ratio, pad height - target_width = orig_width - target_height = int(target_width / target_ratio) + input_width, input_height = image.size + height = min(input_height, output_height) + width = min(input_width, output_width) - # Calculate padding needed - pad_right = target_width - orig_width - pad_bottom = target_height - orig_height + if output_height == input_height and output_width == input_width: + return image + if input_height / output_height > input_width / output_width: + # Resize to match height, width will be smaller than output_width + height = output_height + width = int(input_width * output_height / input_height) + else: + # Resize to match width, height will be smaller than output_height + width = output_width + height = int(input_height * output_width / input_width) + image= image.resize((width, height), resample=3) # Create new image with target dimensions and a white background - new_image = Image.new("RGB", (target_width, target_height), (255, 255, 255)) + new_image = Image.new("RGB", (output_width, output_height), (255, 255, 255)) new_image.paste(image, (0, 0)) return new_image @@ -58,7 +57,7 @@ def process_pdf_page_to_image(page, dpi): else: image = Image.frombytes('RGB', (pix.width, pix.height), pix.samples) - image = pad_image_to_ratio(image, UNIFIED_WIDTH / UNIFIED_HEIGHT) + image = pad_image_to_ratio(image, output_width = UNIFIED_WIDTH,output_height=UNIFIED_HEIGHT) image = np.array(image)[:,:,::-1] return image.copy() @@ -66,7 +65,9 @@ def process_pdf_page_to_image(page, dpi): class PDFImageDataset(IterableDataset): client = None #client = build_client() - def __init__(self, metadata_filepath, aug, input_format, mfd_pre_transform, return_original_image=False): + def __init__(self, metadata_filepath, aug, input_format, + mfd_pre_transform, + return_original_image=False): super().__init__() self.metadata= self.smart_read_json(metadata_filepath) #self.pathlist= [t['path'] for t in self.metadata] @@ -131,22 +132,23 @@ def current_doc(self): if len(self.last_read_pdf_buffer)==0:return None return list(self.last_read_pdf_buffer.values())[0] + def prepare_for_text_det(self,image): + return self.text_det_transform(image)[0] + def read_data_based_on_current_state(self): page = self.get_pdf_by_index(self.current_pdf_index).load_page(self.current_page_index) oimage = process_pdf_page_to_image(page, self.dpi) original_image = oimage[:, :, ::-1] if self.input_format == "RGB" else oimage height, width = original_image.shape[:2] layout_image = self.aug.get_transform(original_image).apply_image(original_image) - layout_image = torch.as_tensor(layout_image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] + layout_image = torch.as_tensor(layout_image.astype("float32").transpose(2, 0, 1))[:,:1042,:800] ## it will be 1043x800 --> 1042:800 ## lets make sure the image has correct size - if layout_image.size(1) < 1042: - layout_image = torch.nn.functional.pad(layout_image, (0, 0, 0, 1042-layout_image.size(1))) + # if layout_image.size(1) < 1042: + # layout_image = torch.nn.functional.pad(layout_image, (0, 0, 0, 1042-layout_image.size(1))) mfd_image=self.prepare_for_mfd_model(oimage) + #print(self.current_pdf_index, self.current_page_index) - if self.return_original_image: - return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width, oimage - else: - return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width + return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width, oimage def check_should_skip(self): if self.current_pdf_index >= len(self.metadata): @@ -162,18 +164,21 @@ def check_should_skip(self): raise StopIteration def __next__(self): - fail_times = 0 - try: - self.check_should_skip() - output = self.read_data_based_on_current_state() - self.current_page_index += 1 - fail_times = 0 - except StopIteration: - raise StopIteration - except: - fail_times +=1 - if fail_times>10: - raise StopIteration + self.check_should_skip() + output = self.read_data_based_on_current_state() + self.current_page_index += 1 + # fail_times = 0 + # try: + # self.check_should_skip() + # output = self.read_data_based_on_current_state() + # self.current_page_index += 1 + # fail_times = 0 + # except StopIteration: + # raise StopIteration + # except: + # fail_times +=1 + # if fail_times>10: + # raise StopIteration return output @@ -192,22 +197,31 @@ def prepare_for_mfd_model(self, im:np.ndarray): return im[0] def custom_collate_fn(batch): - oimages = [] - images = [] + current_pdf_indexes = [] + current_page_indexes = [] + mfd_images = [] + layout_images = [] heights = [] widths = [] + oimages = [] - for oimage, image, height, width in batch: - oimages.append(oimage) - images.append(torch.tensor(image)) - heights.append(torch.tensor(height)) - widths.append(torch.tensor(width)) - - images = torch.stack(images) - heights = torch.stack(heights) - widths = torch.stack(widths) - return oimages, images, heights, widths + for current_pdf_index, current_page_index, mfd_image, layout_image, height, width, oimage in batch: + current_pdf_indexes.append(current_pdf_index) + current_page_indexes.append(current_page_index) + mfd_images.append(mfd_image) + layout_images.append(layout_image) + heights.append(height) + widths.append(width) + oimages.append(oimage) + + mfd_images = torch.stack(mfd_images) + layout_images = torch.stack(layout_images) + heights = torch.tensor(heights) + widths = torch.tensor(widths) + current_pdf_indexes = torch.tensor(current_pdf_indexes) + current_page_indexes = torch.tensor(current_page_indexes) + return current_pdf_indexes, current_page_indexes, mfd_images, layout_images, heights, widths, oimages def clean_layout_dets(layout_dets): rows = [] @@ -221,11 +235,15 @@ def clean_layout_dets(layout_dets): return rows from ultralytics.utils import ops import copy -def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_batch_size=4, batch_size=32,num_workers=8): +from modules.self_modify import ModifiedPaddleOCR,update_det_boxes,sorted_boxes +from utils import * + + +def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmodel=None, inner_batch_size=4, batch_size=32,num_workers=8,timer=Timers(False)): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) - dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=None, num_workers=num_workers) + dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=custom_collate_fn, num_workers=num_workers) featcher = DataPrefetcher(dataloader,device='cuda') data_to_save = {} inner_batch_size = inner_batch_size @@ -233,24 +251,29 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ pdf_passed = set() batch = featcher.next() while batch is not None: - try: - pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch = batch - #for pdf_index,mfd_images_batch,images_batch,heights_batch, widths_batch in dataloader: - pdf_index = set([t.item() for t in pdf_index_batch]) - new_pdf_processed = pdf_index - pdf_passed - pdf_passed = pdf_passed|pdf_index - - for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=3,leave=False,desc="mini-Batch"): - pdf_index = pdf_index_batch[j:j+inner_batch_size] - page_ids = page_ids_batch[j:j+inner_batch_size] - mfd_images = mfd_images_batch[j:j+inner_batch_size] - images = images_batch[j:j+inner_batch_size] - heights = heights_batch[j:j+inner_batch_size] - widths = widths_batch[j:j+inner_batch_size] - + # try: + pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch,oimage_list = batch + pdf_index = set([t.item() for t in pdf_index_batch]) + new_pdf_processed = pdf_index - pdf_passed + pdf_passed = pdf_passed|pdf_index + + for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=3,leave=False,desc="mini-Batch"): + pdf_index = pdf_index_batch[j:j+inner_batch_size] + page_ids = page_ids_batch[j:j+inner_batch_size] + mfd_images = mfd_images_batch[j:j+inner_batch_size] + images = images_batch[j:j+inner_batch_size] + heights = heights_batch[j:j+inner_batch_size] + widths = widths_batch[j:j+inner_batch_size] + oimages = oimage_list[j:j+inner_batch_size] + with timer('get_layout'): layout_res = layout_model((images,heights, widths), ignore_catids=[]) + with timer('get_mfd'): mfd_res = mfd_model.predict(mfd_images, imgsz=(1888,1472), conf=0.3, iou=0.5, verbose=False) - + + with timer('combine_layout_mfd_result'): + rough_layout_this_batch =[] + ori_shape_list = [] + pdf_and_page_id_this_batch = [] for pdf_id, page_id, layout_det, mfd_det, real_input_height, real_input_width in zip(pdf_index, page_ids, layout_res, mfd_res, heights, widths): mfd_height,mfd_width = mfd_det.orig_shape pdf_id = int(pdf_id) @@ -274,10 +297,38 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ } layout_dets.append(new_item) data_to_save[pdf_path][page_id] = layout_dets - pbar.update(len(new_pdf_processed)) - except: - print("ERROR: Fail to process batch") + ori_shape_list.append((real_input_height, real_input_width)) + pdf_and_page_id_this_batch.append((pdf_id, page_id)) + rough_layout_this_batch.append(layout_dets) + + if ocrmodel is not None: + with timer('text_detection/collect_for_line_detect'): + canvas_tensor_this_batch, partition_per_batch,canvas_idxes_this_batch,single_page_mfdetrec_res_this_batch = collect_paragraph_image_and_its_coordinate(oimages, rough_layout_this_batch) + with timer('text_detection/get_line_detect_result'): + dt_boxes_list = ocrmodel.batch_det_model.batch_process(canvas_tensor_this_batch,ori_shape_list=ori_shape_list) + with timer('text_detection/collect_for_text_images'): + text_image_batch, text_image_position,text_line_bbox = collect_text_image_and_its_coordinate(single_page_mfdetrec_res_this_batch, partition_per_batch, canvas_tensor_this_batch,dt_boxes_list) + with timer('text_detection/get_line_text'): + rec_res, elapse = ocrmodel.text_recognizer(text_image_batch) + for line_box, rec_result,(partition_id,text_block_id, text_line_id) in zip(text_line_bbox, rec_res,text_image_position): + text, score = rec_result + pdf_id, page_id = pdf_and_page_id_this_batch[partition_id] + pdf_path = dataset.metadata[pdf_id]['path'] + p1, p2, p3, p4 = line_box.tolist() + data_to_save[pdf_path][page_id].append( + { + 'category_id': 15, + 'poly': p1 + p2 + p3 + p4, + 'score': round(score, 2), + 'text': text, + } + + ) + pbar.update(len(new_pdf_processed)) + # except: + # print("ERROR: Fail to process batch") + timer.log() batch = featcher.next() ### next, we construct each result for each pdf in pdf wise and remove the page_id by the list position @@ -316,9 +367,12 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, inner_ layout_model = get_layout_model(model_configs) mfd_model = get_batch_YOLO_model(model_configs) + ocrmodel = None + ocrmodel = ocr_model = ModifiedPaddleOCR(show_log=True) + timer = Timers(True) deal_with_one_dataset("debug.jsonl", "debug.stage_1.jsonl", - layout_model, mfd_model, inner_batch_size=3, batch_size=3,num_workers=0) + layout_model, mfd_model, ocrmodel=ocrmodel, inner_batch_size=3, batch_size=12,num_workers=4,timer=timer) # dataset = PDFImageDataset("part-66210c190659-000035.jsonl",layout_model.predictor.aug,layout_model.predictor.input_format,mfd_pre_transform=None) # dataloader = DataLoader(dataset, batch_size=8,collate_fn=custom_collate_fn) From e727dfa860ebd3b3875380a825b131144ed8e54a Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Tue, 30 Jul 2024 07:45:04 +0000 Subject: [PATCH 12/74] add pytorchocr --- configs/cls/cls_mv3.yml | 96 + configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det.yml | 163 + .../det/ch_PP-OCRv4/ch_PP-OCRv4_det_cml.yml | 235 + .../ch_PP-OCRv4/ch_PP-OCRv4_det_student.yml | 171 + .../ch_PP-OCRv4/ch_PP-OCRv4_det_teacher.yml | 172 + .../det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml | 134 + .../ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml | 133 + configs/det/det_mv3_db.yml | 133 + configs/det/det_mv3_east.yml | 111 + configs/det/det_mv3_pse.yml | 135 + configs/det/det_ppocr_v3.yml | 163 + configs/det/det_r50_db++_icdar15.yml | 163 + configs/det/det_r50_db++_td_tr.yml | 166 + configs/det/det_r50_vd_db.yml | 130 + configs/det/det_r50_vd_dcn_fce_ctw.yml | 139 + configs/det/det_r50_vd_east.yml | 110 + configs/det/det_r50_vd_pse.yml | 134 + configs/det/det_r50_vd_sast_icdar15.yml | 110 + configs/det/det_r50_vd_sast_totaltext.yml | 110 + configs/e2e/e2e_r50_vd_pg.yml | 114 + configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml | 126 + .../PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml | 205 + configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml | 126 + configs/rec/PP-OCRv3/multi_language/.gitkeep | 0 .../multi_language/arabic_PP-OCRv3_rec.yml | 126 + .../chinese_cht_PP-OCRv3_rec.yml | 126 + .../multi_language/cyrillic_PP-OCRv3_rec.yml | 126 + .../devanagari_PP-OCRv3_rec.yml | 126 + .../multi_language/japan_PP-OCRv3_rec.yml | 126 + .../multi_language/ka_PP-OCRv3_rec.yml | 126 + .../multi_language/korean_PP-OCRv3_rec.yml | 126 + .../multi_language/latin_PP-OCRv3_rec.yml | 126 + .../multi_language/ta_PP-OCRv3_rec.yml | 126 + .../multi_language/te_PP-OCRv3_rec.yml | 126 + configs/rec/PP-OCRv4/ch_PP-OCRv4_rec.yml | 138 + .../PP-OCRv4/ch_PP-OCRv4_rec_ampO2_ultra.yml | 140 + configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ctc.yml | 132 + .../rec/PP-OCRv4/ch_PP-OCRv4_rec_distill.yml | 231 + .../PP-OCRv4/ch_PP-OCRv4_rec_fp32_ultra.yml | 138 + .../rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet.yml | 137 + .../ch_PP-OCRv4_rec_hgnet_ampO2_ultra.yml | 139 + .../ch_PP-OCRv4_rec_hgnet_fp32_ultra.yml | 137 + configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml | 150 + .../rec_chinese_common_train_v2.0.yml | 100 + .../rec_chinese_lite_train_v2.0.yml | 102 + .../generate_multi_language_configs.py | 200 + .../multi_language/rec_arabic_lite_train.yml | 111 + .../rec_cyrillic_lite_train.yml | 111 + .../rec_devanagari_lite_train.yml | 111 + .../rec_en_number_lite_train.yml | 102 + .../multi_language/rec_french_lite_train.yml | 102 + .../multi_language/rec_german_lite_train.yml | 102 + .../multi_language/rec_japan_lite_train.yml | 102 + .../multi_language/rec_korean_lite_train.yml | 102 + .../multi_language/rec_latin_lite_train.yml | 111 + .../rec_multi_language_lite_train.yml | 103 + configs/rec/rec_d28_can.yml | 122 + configs/rec/rec_efficientb3_fpn_pren.yml | 92 + configs/rec/rec_icdar15_train.yml | 100 + configs/rec/rec_mtb_nrtr.yml | 101 + configs/rec/rec_mv3_none_bilstm_ctc.yml | 96 + configs/rec/rec_mv3_none_none_ctc.yml | 95 + configs/rec/rec_mv3_tps_bilstm_att.yml | 102 + configs/rec/rec_mv3_tps_bilstm_ctc.yml | 100 + configs/rec/rec_r31_sar.yml | 98 + configs/rec/rec_r34_vd_none_bilstm_ctc.yml | 95 + configs/rec/rec_r34_vd_none_none_ctc.yml | 93 + configs/rec/rec_r34_vd_tps_bilstm_att.yml | 101 + configs/rec/rec_r34_vd_tps_bilstm_ctc.yml | 99 + configs/rec/rec_r50_fpn_srn.yml | 107 + .../rec_svtr_base_8local_10global_stn_en.yml | 117 + ...rec_svtr_large_10local_11global_stn_ch.yml | 113 + ...rec_svtr_large_10local_11global_stn_en.yml | 117 + .../rec_svtr_small_8local_7global_stn_ch.yml | 114 + .../rec_svtr_small_8local_7global_stn_en.yml | 117 + .../rec_svtr_tiny_6local_6global_stn_ch.yml | 114 + .../rec_svtr_tiny_6local_6global_stn_en.yml | 117 + configs/rec/rec_svtrnet.yml | 117 + configs/rec/rec_vitstr_none_ce.yml | 102 + configs/sr/sr_telescope.yml | 84 + configs/sr/sr_tsrn_transformer_strock.yml | 85 + configs/table/table_mv3.yml | 116 + configs/table/table_mv3_det.yml | 15 + configs/table/table_mv3_rec.yml | 14 + configs/table/table_mv3_table_structure.yml | 15 + modules/batch_text_detector.py | 363 + modules/pytorchocr/__init__.py | 0 modules/pytorchocr/base_ocr_v20.py | 112 + modules/pytorchocr/data/__init__.py | 24 + modules/pytorchocr/data/imaug/__init__.py | 48 + .../pytorchocr/data/imaug/gen_table_mask.py | 245 + modules/pytorchocr/data/imaug/operators.py | 418 + modules/pytorchocr/modeling/__init__.py | 0 .../modeling/architectures/__init__.py | 25 + .../modeling/architectures/base_model.py | 125 + .../pytorchocr/modeling/backbones/__init__.py | 56 + .../modeling/backbones/det_mobilenet_v3.py | 256 + .../modeling/backbones/det_resnet.py | 210 + .../modeling/backbones/det_resnet_vd.py | 360 + .../modeling/backbones/det_resnet_vd_sast.py | 279 + .../modeling/backbones/e2e_resnet_vd_pg.py | 247 + .../modeling/backbones/rec_densenet.py | 133 + .../modeling/backbones/rec_hgnet.py | 324 + .../modeling/backbones/rec_lcnetv3.py | 474 + .../modeling/backbones/rec_lcnetv3_bak.py | 444 + .../modeling/backbones/rec_mobilenet_v3.py | 125 + .../modeling/backbones/rec_mv1_enhance.py | 233 + .../modeling/backbones/rec_nrtr_mtb.py | 36 + .../modeling/backbones/rec_resnet_31.py | 200 + .../modeling/backbones/rec_resnet_fpn.py | 278 + .../modeling/backbones/rec_resnet_vd.py | 260 + .../modeling/backbones/rec_svtrnet.py | 582 ++ .../modeling/backbones/rec_vitstr.py | 120 + .../modeling/backbones/table_mobilenet_v3.py | 270 + .../modeling/backbones/table_resnet_vd.py | 269 + modules/pytorchocr/modeling/common.py | 73 + modules/pytorchocr/modeling/heads/__init__.py | 51 + modules/pytorchocr/modeling/heads/cls_head.py | 28 + .../pytorchocr/modeling/heads/det_db_head.py | 121 + .../modeling/heads/det_east_head.py | 112 + .../pytorchocr/modeling/heads/det_fce_head.py | 82 + .../pytorchocr/modeling/heads/det_pse_head.py | 25 + .../modeling/heads/det_sast_head.py | 118 + .../pytorchocr/modeling/heads/e2e_pg_head.py | 234 + .../modeling/heads/multiheadAttention.py | 150 + .../pytorchocr/modeling/heads/rec_att_head.py | 190 + .../pytorchocr/modeling/heads/rec_can_head.py | 288 + .../pytorchocr/modeling/heads/rec_ctc_head.py | 53 + .../modeling/heads/rec_multi_head.py | 88 + .../modeling/heads/rec_nrtr_head.py | 812 ++ .../pytorchocr/modeling/heads/rec_sar_head.py | 403 + .../pytorchocr/modeling/heads/rec_srn_head.py | 271 + .../modeling/heads/self_attention.py | 419 + .../modeling/heads/sr_rensnet_transformer.py | 419 + .../modeling/heads/table_att_head.py | 207 + modules/pytorchocr/modeling/necks/__init__.py | 33 + modules/pytorchocr/modeling/necks/db_fpn.py | 414 + modules/pytorchocr/modeling/necks/east_fpn.py | 184 + modules/pytorchocr/modeling/necks/fce_fpn.py | 323 + modules/pytorchocr/modeling/necks/fpn.py | 109 + modules/pytorchocr/modeling/necks/intracl.py | 114 + modules/pytorchocr/modeling/necks/pg_fpn.py | 297 + modules/pytorchocr/modeling/necks/rnn.py | 205 + modules/pytorchocr/modeling/necks/sast_fpn.py | 305 + .../pytorchocr/modeling/necks/table_fpn.py | 88 + .../modeling/transforms/__init__.py | 30 + modules/pytorchocr/modeling/transforms/stn.py | 121 + .../pytorchocr/modeling/transforms/tbsrn.py | 267 + modules/pytorchocr/modeling/transforms/tps.py | 301 + .../transforms/tps_spatial_transformer.py | 136 + .../pytorchocr/modeling/transforms/tsrn.py | 220 + modules/pytorchocr/postprocess/__init__.py | 41 + .../pytorchocr/postprocess/cls_postprocess.py | 20 + .../pytorchocr/postprocess/db_postprocess.py | 179 + .../postprocess/east_postprocess.py | 144 + .../pytorchocr/postprocess/fce_postprocess.py | 228 + .../postprocess/locality_aware_nms.py | 199 + .../pytorchocr/postprocess/pg_postprocess.py | 52 + .../postprocess/pse_postprocess/__init__.py | 15 + .../postprocess/pse_postprocess/pse/README.md | 6 + .../pse_postprocess/pse/__init__.py | 29 + .../postprocess/pse_postprocess/pse/pse.pyx | 70 + .../postprocess/pse_postprocess/pse/setup.py | 14 + .../pse_postprocess/pse_postprocess.py | 105 + .../pytorchocr/postprocess/rec_postprocess.py | 693 ++ .../postprocess/sast_postprocess.py | 302 + modules/pytorchocr/pytorchocr_utility.py | 555 ++ modules/pytorchocr/utils/EN_symbol_dict.txt | 94 + modules/pytorchocr/utils/__init__.py | 0 modules/pytorchocr/utils/dict/ar_dict.txt | 117 + modules/pytorchocr/utils/dict/arabic_dict.txt | 162 + modules/pytorchocr/utils/dict/be_dict.txt | 145 + modules/pytorchocr/utils/dict/bg_dict.txt | 140 + modules/pytorchocr/utils/dict/ch_tra_dict.txt | 8421 +++++++++++++++++ .../utils/dict/chinese_cht_dict.txt | 8421 +++++++++++++++++ .../pytorchocr/utils/dict/cyrillic_dict.txt | 163 + .../pytorchocr/utils/dict/devanagari_dict.txt | 167 + modules/pytorchocr/utils/dict/en_dict.txt | 63 + modules/pytorchocr/utils/dict/es_dict.txt | 110 + modules/pytorchocr/utils/dict/fa_dict.txt | 136 + modules/pytorchocr/utils/dict/french_dict.txt | 136 + modules/pytorchocr/utils/dict/german_dict.txt | 143 + modules/pytorchocr/utils/dict/hi_dict.txt | 162 + modules/pytorchocr/utils/dict/it_dict.txt | 118 + modules/pytorchocr/utils/dict/japan_dict.txt | 4399 +++++++++ modules/pytorchocr/utils/dict/ka_dict.txt | 153 + modules/pytorchocr/utils/dict/kn_dict.txt | 153 + modules/pytorchocr/utils/dict/korean_dict.txt | 3688 ++++++++ .../utils/dict/latex_symbol_dict.txt | 111 + modules/pytorchocr/utils/dict/latin_dict.txt | 185 + modules/pytorchocr/utils/dict/mr_dict.txt | 153 + modules/pytorchocr/utils/dict/ne_dict.txt | 153 + modules/pytorchocr/utils/dict/oc_dict.txt | 96 + modules/pytorchocr/utils/dict/pt_dict.txt | 130 + modules/pytorchocr/utils/dict/pu_dict.txt | 130 + .../utils/dict/rs_cyrillic_dict.txt | 134 + modules/pytorchocr/utils/dict/rs_dict.txt | 91 + .../pytorchocr/utils/dict/rs_latin_dict.txt | 91 + modules/pytorchocr/utils/dict/rsc_dict.txt | 134 + modules/pytorchocr/utils/dict/ru_dict.txt | 125 + modules/pytorchocr/utils/dict/ta_dict.txt | 128 + modules/pytorchocr/utils/dict/table_dict.txt | 277 + .../utils/dict/table_structure_dict.txt | 2759 ++++++ modules/pytorchocr/utils/dict/te_dict.txt | 151 + modules/pytorchocr/utils/dict/ug_dict.txt | 114 + modules/pytorchocr/utils/dict/uk_dict.txt | 142 + modules/pytorchocr/utils/dict/ur_dict.txt | 137 + modules/pytorchocr/utils/dict/xi_dict.txt | 110 + modules/pytorchocr/utils/dict90.txt | 90 + .../utils/e2e_utils/extract_batchsize.py | 88 + .../utils/e2e_utils/extract_textpoint_fast.py | 457 + .../utils/e2e_utils/extract_textpoint_slow.py | 592 ++ .../utils/e2e_utils/pgnet_pp_utils.py | 150 + modules/pytorchocr/utils/e2e_utils/visual.py | 162 + modules/pytorchocr/utils/en_dict.txt | 95 + modules/pytorchocr/utils/ic15_dict.txt | 36 + modules/pytorchocr/utils/logging.py | 52 + modules/pytorchocr/utils/poly_nms.py | 146 + modules/pytorchocr/utils/ppocr_keys_v1.txt | 6623 +++++++++++++ modules/pytorchocr/utils/utility.py | 70 + rough_layout.py | 5 +- 221 files changed, 67564 insertions(+), 1 deletion(-) create mode 100644 configs/cls/cls_mv3.yml create mode 100644 configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det.yml create mode 100644 configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_cml.yml create mode 100644 configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_student.yml create mode 100644 configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_teacher.yml create mode 100644 configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml create mode 100644 configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml create mode 100644 configs/det/det_mv3_db.yml create mode 100644 configs/det/det_mv3_east.yml create mode 100644 configs/det/det_mv3_pse.yml create mode 100644 configs/det/det_ppocr_v3.yml create mode 100644 configs/det/det_r50_db++_icdar15.yml create mode 100644 configs/det/det_r50_db++_td_tr.yml create mode 100644 configs/det/det_r50_vd_db.yml create mode 100644 configs/det/det_r50_vd_dcn_fce_ctw.yml create mode 100644 configs/det/det_r50_vd_east.yml create mode 100644 configs/det/det_r50_vd_pse.yml create mode 100644 configs/det/det_r50_vd_sast_icdar15.yml create mode 100644 configs/det/det_r50_vd_sast_totaltext.yml create mode 100644 configs/e2e/e2e_r50_vd_pg.yml create mode 100644 configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml create mode 100644 configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/.gitkeep create mode 100644 configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/chinese_cht_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/japan_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/korean_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv3/multi_language/te_PP-OCRv3_rec.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ampO2_ultra.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ctc.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_distill.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_fp32_ultra.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_ampO2_ultra.yml create mode 100644 configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_fp32_ultra.yml create mode 100644 configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml create mode 100644 configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml create mode 100644 configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml create mode 100644 configs/rec/multi_language/generate_multi_language_configs.py create mode 100644 configs/rec/multi_language/rec_arabic_lite_train.yml create mode 100644 configs/rec/multi_language/rec_cyrillic_lite_train.yml create mode 100644 configs/rec/multi_language/rec_devanagari_lite_train.yml create mode 100644 configs/rec/multi_language/rec_en_number_lite_train.yml create mode 100644 configs/rec/multi_language/rec_french_lite_train.yml create mode 100644 configs/rec/multi_language/rec_german_lite_train.yml create mode 100644 configs/rec/multi_language/rec_japan_lite_train.yml create mode 100644 configs/rec/multi_language/rec_korean_lite_train.yml create mode 100644 configs/rec/multi_language/rec_latin_lite_train.yml create mode 100644 configs/rec/multi_language/rec_multi_language_lite_train.yml create mode 100644 configs/rec/rec_d28_can.yml create mode 100644 configs/rec/rec_efficientb3_fpn_pren.yml create mode 100644 configs/rec/rec_icdar15_train.yml create mode 100644 configs/rec/rec_mtb_nrtr.yml create mode 100644 configs/rec/rec_mv3_none_bilstm_ctc.yml create mode 100644 configs/rec/rec_mv3_none_none_ctc.yml create mode 100644 configs/rec/rec_mv3_tps_bilstm_att.yml create mode 100644 configs/rec/rec_mv3_tps_bilstm_ctc.yml create mode 100644 configs/rec/rec_r31_sar.yml create mode 100644 configs/rec/rec_r34_vd_none_bilstm_ctc.yml create mode 100644 configs/rec/rec_r34_vd_none_none_ctc.yml create mode 100644 configs/rec/rec_r34_vd_tps_bilstm_att.yml create mode 100644 configs/rec/rec_r34_vd_tps_bilstm_ctc.yml create mode 100644 configs/rec/rec_r50_fpn_srn.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_base_8local_10global_stn_en.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_ch.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_en.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_ch.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_en.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_ch.yml create mode 100644 configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_en.yml create mode 100644 configs/rec/rec_svtrnet.yml create mode 100644 configs/rec/rec_vitstr_none_ce.yml create mode 100644 configs/sr/sr_telescope.yml create mode 100644 configs/sr/sr_tsrn_transformer_strock.yml create mode 100644 configs/table/table_mv3.yml create mode 100644 configs/table/table_mv3_det.yml create mode 100644 configs/table/table_mv3_rec.yml create mode 100644 configs/table/table_mv3_table_structure.yml create mode 100644 modules/batch_text_detector.py create mode 100644 modules/pytorchocr/__init__.py create mode 100644 modules/pytorchocr/base_ocr_v20.py create mode 100644 modules/pytorchocr/data/__init__.py create mode 100644 modules/pytorchocr/data/imaug/__init__.py create mode 100644 modules/pytorchocr/data/imaug/gen_table_mask.py create mode 100644 modules/pytorchocr/data/imaug/operators.py create mode 100644 modules/pytorchocr/modeling/__init__.py create mode 100644 modules/pytorchocr/modeling/architectures/__init__.py create mode 100644 modules/pytorchocr/modeling/architectures/base_model.py create mode 100644 modules/pytorchocr/modeling/backbones/__init__.py create mode 100644 modules/pytorchocr/modeling/backbones/det_mobilenet_v3.py create mode 100644 modules/pytorchocr/modeling/backbones/det_resnet.py create mode 100644 modules/pytorchocr/modeling/backbones/det_resnet_vd.py create mode 100644 modules/pytorchocr/modeling/backbones/det_resnet_vd_sast.py create mode 100644 modules/pytorchocr/modeling/backbones/e2e_resnet_vd_pg.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_densenet.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_hgnet.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_lcnetv3.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_lcnetv3_bak.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_mobilenet_v3.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_mv1_enhance.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_nrtr_mtb.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_resnet_31.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_resnet_fpn.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_resnet_vd.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_svtrnet.py create mode 100644 modules/pytorchocr/modeling/backbones/rec_vitstr.py create mode 100644 modules/pytorchocr/modeling/backbones/table_mobilenet_v3.py create mode 100644 modules/pytorchocr/modeling/backbones/table_resnet_vd.py create mode 100644 modules/pytorchocr/modeling/common.py create mode 100644 modules/pytorchocr/modeling/heads/__init__.py create mode 100644 modules/pytorchocr/modeling/heads/cls_head.py create mode 100644 modules/pytorchocr/modeling/heads/det_db_head.py create mode 100644 modules/pytorchocr/modeling/heads/det_east_head.py create mode 100644 modules/pytorchocr/modeling/heads/det_fce_head.py create mode 100644 modules/pytorchocr/modeling/heads/det_pse_head.py create mode 100644 modules/pytorchocr/modeling/heads/det_sast_head.py create mode 100644 modules/pytorchocr/modeling/heads/e2e_pg_head.py create mode 100644 modules/pytorchocr/modeling/heads/multiheadAttention.py create mode 100644 modules/pytorchocr/modeling/heads/rec_att_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_can_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_ctc_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_multi_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_nrtr_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_sar_head.py create mode 100644 modules/pytorchocr/modeling/heads/rec_srn_head.py create mode 100644 modules/pytorchocr/modeling/heads/self_attention.py create mode 100644 modules/pytorchocr/modeling/heads/sr_rensnet_transformer.py create mode 100644 modules/pytorchocr/modeling/heads/table_att_head.py create mode 100644 modules/pytorchocr/modeling/necks/__init__.py create mode 100644 modules/pytorchocr/modeling/necks/db_fpn.py create mode 100644 modules/pytorchocr/modeling/necks/east_fpn.py create mode 100644 modules/pytorchocr/modeling/necks/fce_fpn.py create mode 100644 modules/pytorchocr/modeling/necks/fpn.py create mode 100644 modules/pytorchocr/modeling/necks/intracl.py create mode 100644 modules/pytorchocr/modeling/necks/pg_fpn.py create mode 100644 modules/pytorchocr/modeling/necks/rnn.py create mode 100644 modules/pytorchocr/modeling/necks/sast_fpn.py create mode 100644 modules/pytorchocr/modeling/necks/table_fpn.py create mode 100644 modules/pytorchocr/modeling/transforms/__init__.py create mode 100644 modules/pytorchocr/modeling/transforms/stn.py create mode 100644 modules/pytorchocr/modeling/transforms/tbsrn.py create mode 100644 modules/pytorchocr/modeling/transforms/tps.py create mode 100644 modules/pytorchocr/modeling/transforms/tps_spatial_transformer.py create mode 100644 modules/pytorchocr/modeling/transforms/tsrn.py create mode 100644 modules/pytorchocr/postprocess/__init__.py create mode 100644 modules/pytorchocr/postprocess/cls_postprocess.py create mode 100644 modules/pytorchocr/postprocess/db_postprocess.py create mode 100644 modules/pytorchocr/postprocess/east_postprocess.py create mode 100644 modules/pytorchocr/postprocess/fce_postprocess.py create mode 100644 modules/pytorchocr/postprocess/locality_aware_nms.py create mode 100644 modules/pytorchocr/postprocess/pg_postprocess.py create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/__init__.py create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/pse/README.md create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/pse/__init__.py create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/pse/pse.pyx create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/pse/setup.py create mode 100644 modules/pytorchocr/postprocess/pse_postprocess/pse_postprocess.py create mode 100644 modules/pytorchocr/postprocess/rec_postprocess.py create mode 100644 modules/pytorchocr/postprocess/sast_postprocess.py create mode 100644 modules/pytorchocr/pytorchocr_utility.py create mode 100644 modules/pytorchocr/utils/EN_symbol_dict.txt create mode 100644 modules/pytorchocr/utils/__init__.py create mode 100644 modules/pytorchocr/utils/dict/ar_dict.txt create mode 100644 modules/pytorchocr/utils/dict/arabic_dict.txt create mode 100644 modules/pytorchocr/utils/dict/be_dict.txt create mode 100644 modules/pytorchocr/utils/dict/bg_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ch_tra_dict.txt create mode 100644 modules/pytorchocr/utils/dict/chinese_cht_dict.txt create mode 100644 modules/pytorchocr/utils/dict/cyrillic_dict.txt create mode 100644 modules/pytorchocr/utils/dict/devanagari_dict.txt create mode 100644 modules/pytorchocr/utils/dict/en_dict.txt create mode 100644 modules/pytorchocr/utils/dict/es_dict.txt create mode 100644 modules/pytorchocr/utils/dict/fa_dict.txt create mode 100644 modules/pytorchocr/utils/dict/french_dict.txt create mode 100644 modules/pytorchocr/utils/dict/german_dict.txt create mode 100644 modules/pytorchocr/utils/dict/hi_dict.txt create mode 100644 modules/pytorchocr/utils/dict/it_dict.txt create mode 100644 modules/pytorchocr/utils/dict/japan_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ka_dict.txt create mode 100644 modules/pytorchocr/utils/dict/kn_dict.txt create mode 100644 modules/pytorchocr/utils/dict/korean_dict.txt create mode 100644 modules/pytorchocr/utils/dict/latex_symbol_dict.txt create mode 100644 modules/pytorchocr/utils/dict/latin_dict.txt create mode 100644 modules/pytorchocr/utils/dict/mr_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ne_dict.txt create mode 100644 modules/pytorchocr/utils/dict/oc_dict.txt create mode 100644 modules/pytorchocr/utils/dict/pt_dict.txt create mode 100644 modules/pytorchocr/utils/dict/pu_dict.txt create mode 100644 modules/pytorchocr/utils/dict/rs_cyrillic_dict.txt create mode 100644 modules/pytorchocr/utils/dict/rs_dict.txt create mode 100644 modules/pytorchocr/utils/dict/rs_latin_dict.txt create mode 100644 modules/pytorchocr/utils/dict/rsc_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ru_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ta_dict.txt create mode 100644 modules/pytorchocr/utils/dict/table_dict.txt create mode 100644 modules/pytorchocr/utils/dict/table_structure_dict.txt create mode 100644 modules/pytorchocr/utils/dict/te_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ug_dict.txt create mode 100644 modules/pytorchocr/utils/dict/uk_dict.txt create mode 100644 modules/pytorchocr/utils/dict/ur_dict.txt create mode 100644 modules/pytorchocr/utils/dict/xi_dict.txt create mode 100644 modules/pytorchocr/utils/dict90.txt create mode 100644 modules/pytorchocr/utils/e2e_utils/extract_batchsize.py create mode 100644 modules/pytorchocr/utils/e2e_utils/extract_textpoint_fast.py create mode 100644 modules/pytorchocr/utils/e2e_utils/extract_textpoint_slow.py create mode 100644 modules/pytorchocr/utils/e2e_utils/pgnet_pp_utils.py create mode 100644 modules/pytorchocr/utils/e2e_utils/visual.py create mode 100644 modules/pytorchocr/utils/en_dict.txt create mode 100644 modules/pytorchocr/utils/ic15_dict.txt create mode 100644 modules/pytorchocr/utils/logging.py create mode 100644 modules/pytorchocr/utils/poly_nms.py create mode 100644 modules/pytorchocr/utils/ppocr_keys_v1.txt create mode 100644 modules/pytorchocr/utils/utility.py diff --git a/configs/cls/cls_mv3.yml b/configs/cls/cls_mv3.yml new file mode 100644 index 0000000..b165bc4 --- /dev/null +++ b/configs/cls/cls_mv3.yml @@ -0,0 +1,96 @@ +Global: + use_gpu: true + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/cls/mv3/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 1000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + label_list: ['0','180'] + +Architecture: + model_type: cls + algorithm: CLS + Transform: + Backbone: + name: MobileNetV3 + scale: 0.35 + model_name: small + Neck: + Head: + name: ClsHead + class_dim: 2 + +Loss: + name: ClsLoss + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: ClsPostProcess + +Metric: + name: ClsMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/cls + label_file_list: + - ./train_data/cls/train.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ClsLabelEncode: # Class handling label + - RecAug: + use_tia: False + - RandAugment: + - ClsResizeImg: + image_shape: [3, 48, 192] + - KeepKeys: + keep_keys: ['image', 'label'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/cls + label_file_list: + - ./train_data/cls/test.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ClsLabelEncode: # Class handling label + - ClsResizeImg: + image_shape: [3, 48, 192] + - KeepKeys: + keep_keys: ['image', 'label'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 512 + num_workers: 4 \ No newline at end of file diff --git a/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det.yml b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det.yml new file mode 100644 index 0000000..0e8af77 --- /dev/null +++ b/configs/det/ch_PP-OCRv3/ch_PP-OCRv3_det.yml @@ -0,0 +1,163 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/ch_PP-OCR_V3_det/ + save_epoch_step: 100 + eval_batch_step: + - 0 + - 400 + cal_metric_during_train: false + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: True + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DetMetric + main_indicator: hmean +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 960 + - 960 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: null + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 diff --git a/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_cml.yml b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_cml.yml new file mode 100644 index 0000000..fe582ba --- /dev/null +++ b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_cml.yml @@ -0,0 +1,235 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 20 + save_model_dir: ./output/ch_PP-OCRv4 + save_epoch_step: 50 + eval_batch_step: + - 0 + - 1000 + cal_metric_during_train: true + checkpoints: null + pretrained_model: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true +Architecture: + name: DistillationModel + algorithm: Distillation + model_type: det + Models: + Student: + model_type: det + algorithm: DB + Transform: null + Backbone: + name: PPLCNetNew + scale: 0.75 + pretrained: false + Neck: + name: RSEFPN + out_channels: 96 + shortcut: true + Head: + name: DBHead + k: 50 + Student2: + pretrained: null + model_type: det + algorithm: DB + Transform: null + Backbone: + name: PPLCNetNew + scale: 0.75 + pretrained: true + Neck: + name: RSEFPN + out_channels: 96 + shortcut: true + Head: + name: DBHead + k: 50 + Teacher: + pretrained: https://paddleocr.bj.bcebos.com/PP-OCRv4/chinese/ch_PP-OCRv4_det_cml_teacher_pretrained/teacher.pdparams + freeze_params: true + return_all_feats: false + model_type: det + algorithm: DB + Backbone: + name: ResNet_vd + in_channels: 3 + layers: 50 + Neck: + name: LKPAN + out_channels: 256 + Head: + name: DBHead + kernel_list: + - 7 + - 2 + - 2 + k: 50 +Loss: + name: CombinedLoss + loss_config_list: + - DistillationDilaDBLoss: + weight: 1.0 + model_name_pairs: + - - Student + - Teacher + - - Student2 + - Teacher + key: maps + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + - DistillationDMLLoss: + model_name_pairs: + - Student + - Student2 + maps_name: thrink_maps + weight: 1.0 + key: maps + - DistillationDBLoss: + weight: 1.0 + model_name_list: + - Student + - Student2 + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 +PostProcess: + name: DistillationDBPostProcess + model_name: + - Student + key: head_out + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DistillationMetric + base_metric_name: DetMetric + main_indicator: hmean + key: Student +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 640 + - 640 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + total_epoch: 500 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + total_epoch: 500 + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 16 + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: null + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 +profiler_options: null diff --git a/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_student.yml b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_student.yml new file mode 100644 index 0000000..39b260c --- /dev/null +++ b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_student.yml @@ -0,0 +1,171 @@ +Global: + debug: false + use_gpu: true + epoch_num: &epoch_num 500 + log_smooth_window: 20 + print_batch_step: 100 + save_model_dir: ./output/ch_PP-OCRv4 + save_epoch_step: 10 + eval_batch_step: + - 0 + - 1500 + cal_metric_during_train: false + checkpoints: + pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPLCNetV3_x0_75_ocr_det.pdparams + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + model_type: det + algorithm: DB + Transform: null + Backbone: + name: PPLCNetV3 + scale: 0.75 + det: True + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 #(8*8c) + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - CopyPaste: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 640 + - 640 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + total_epoch: *epoch_num + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + total_epoch: *epoch_num + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 +profiler_options: null diff --git a/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_teacher.yml b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_teacher.yml new file mode 100644 index 0000000..b58af1c --- /dev/null +++ b/configs/det/ch_PP-OCRv4/ch_PP-OCRv4_det_teacher.yml @@ -0,0 +1,172 @@ +Global: + debug: false + use_gpu: true + epoch_num: &epoch_num 500 + log_smooth_window: 20 + print_batch_step: 100 + save_model_dir: ./output/ch_PP-OCRv4 + save_epoch_step: 10 + eval_batch_step: + - 0 + - 1500 + cal_metric_during_train: false + checkpoints: + pretrained_model: https://paddleocr.bj.bcebos.com/pretrained/PPHGNet_small_ocr_det.pdparams + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + model_type: det + algorithm: DB + Transform: null + Backbone: + name: PPHGNet_small + det: True + Neck: + name: LKPAN + out_channels: 256 + intracl: true + Head: + name: PFHeadLocal + k: 50 + mode: "large" + + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 #(8*8c) + warmup_epoch: 2 + regularizer: + name: L2 + factor: 1e-6 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - CopyPaste: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 640 + - 640 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + total_epoch: *epoch_num + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + total_epoch: *epoch_num + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 +profiler_options: null diff --git a/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml new file mode 100644 index 0000000..fd88495 --- /dev/null +++ b/configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml @@ -0,0 +1,134 @@ +Global: + use_gpu: true + epoch_num: 1200 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/ch_db_mv3/ + save_epoch_step: 1200 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [3000, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_db/predicts_db.txt + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: True + Neck: + name: DBFPN + out_channels: 96 + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - IaaAugment: + augmenter_args: + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [-10, 10] } } + - { 'type': Resize, 'args': { 'size': [0.5, 3] } } + - EastRandomCropData: + size: [960, 960] + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: +# image_shape: [736, 1280] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 diff --git a/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml new file mode 100644 index 0000000..52d6d4f --- /dev/null +++ b/configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml @@ -0,0 +1,133 @@ +Global: + use_gpu: true + epoch_num: 1200 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/ch_db_res18/ + save_epoch_step: 1200 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [3000, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet18_vd_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_db/predicts_db.txt + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: ResNet_vd + layers: 18 + disable_se: True + Neck: + name: DBFPN + out_channels: 256 + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - IaaAugment: + augmenter_args: + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [-10, 10] } } + - { 'type': Resize, 'args': { 'size': [0.5, 3] } } + - EastRandomCropData: + size: [960, 960] + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: +# image_shape: [736, 1280] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 diff --git a/configs/det/det_mv3_db.yml b/configs/det/det_mv3_db.yml new file mode 100644 index 0000000..00a16b5 --- /dev/null +++ b/configs/det/det_mv3_db.yml @@ -0,0 +1,133 @@ +Global: + use_gpu: true + epoch_num: 1200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/db_mv3/ + save_epoch_step: 1200 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_db/predicts_db.txt + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: DBFPN + out_channels: 256 + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - IaaAugment: + augmenter_args: + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [-10, 10] } } + - { 'type': Resize, 'args': { 'size': [0.5, 3] } } + - EastRandomCropData: + size: [640, 640] + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 16 + num_workers: 8 + use_shared_memory: False + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + image_shape: [736, 1280] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 + use_shared_memory: False \ No newline at end of file diff --git a/configs/det/det_mv3_east.yml b/configs/det/det_mv3_east.yml new file mode 100644 index 0000000..05581a7 --- /dev/null +++ b/configs/det/det_mv3_east.yml @@ -0,0 +1,111 @@ +Global: + use_gpu: true + epoch_num: 10000 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/east_mv3/ + save_epoch_step: 1000 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [4000, 5000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + save_res_path: ./output/det_east/predicts_east.txt + +Architecture: + model_type: det + algorithm: EAST + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: EASTFPN + model_name: small + Head: + name: EASTHead + model_name: small + +Loss: + name: EASTLoss + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + # name: Cosine + learning_rate: 0.001 + # warmup_epoch: 0 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: EASTPostProcess + score_thresh: 0.8 + cover_thresh: 0.1 + nms_thresh: 0.2 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - EASTProcessTrain: + image_shape: [512, 512] + background_ratio: 0.125 + min_crop_side_ratio: 0.1 + min_text_size: 10 + - KeepKeys: + keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 16 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 2400 + limit_type: max + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/det/det_mv3_pse.yml b/configs/det/det_mv3_pse.yml new file mode 100644 index 0000000..61ac247 --- /dev/null +++ b/configs/det/det_mv3_pse.yml @@ -0,0 +1,135 @@ +Global: + use_gpu: true + epoch_num: 600 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_mv3_pse/ + save_epoch_step: 600 + # evaluation is run every 63 iterations + eval_batch_step: [ 0,63 ] + cal_metric_during_train: False + pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained + checkpoints: #./output/det_r50_vd_pse_batch8_ColorJitter/best_accuracy + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_pse/predicts_pse.txt + +Architecture: + model_type: det + algorithm: PSE + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: FPN + out_channels: 96 + Head: + name: PSEHead + hidden_dim: 96 + out_channels: 7 + +Loss: + name: PSELoss + alpha: 0.7 + ohem_ratio: 3 + kernel_sample_mask: pred + reduction: none + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Step + learning_rate: 0.001 + step_size: 200 + gamma: 0.1 + regularizer: + name: 'L2' + factor: 0.0005 + +PostProcess: + name: PSEPostProcess + thresh: 0 + box_thresh: 0.85 + min_area: 16 + box_type: box # 'box' or 'poly' + scale: 1 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - ColorJitter: + brightness: 0.12549019607843137 + saturation: 0.5 + - IaaAugment: + augmenter_args: + - { 'type': Resize, 'args': { 'size': [ 0.5, 3 ] } } + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [ -10, 10 ] } } + - MakePseGt: + kernel_num: 7 + min_shrink_ratio: 0.4 + size: 640 + - RandomCropImgMask: + size: [ 640,640 ] + main_key: gt_text + crop_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 16 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 736 + limit_type: min + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'shape', 'polys', 'ignore_tags' ] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 \ No newline at end of file diff --git a/configs/det/det_ppocr_v3.yml b/configs/det/det_ppocr_v3.yml new file mode 100644 index 0000000..0e8af77 --- /dev/null +++ b/configs/det/det_ppocr_v3.yml @@ -0,0 +1,163 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/ch_PP-OCR_V3_det/ + save_epoch_step: 100 + eval_batch_step: + - 0 + - 400 + cal_metric_during_train: false + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt + distributed: true + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: True + Neck: + name: RSEFPN + out_channels: 96 + shortcut: True + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 5.0e-05 +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DetMetric + main_indicator: hmean +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 960 + - 960 + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 8 + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: null + - NormalizeImage: + scale: 1./255. + mean: + - 0.485 + - 0.456 + - 0.406 + std: + - 0.229 + - 0.224 + - 0.225 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 diff --git a/configs/det/det_r50_db++_icdar15.yml b/configs/det/det_r50_db++_icdar15.yml new file mode 100644 index 0000000..e0cd601 --- /dev/null +++ b/configs/det/det_r50_db++_icdar15.yml @@ -0,0 +1,163 @@ +Global: + debug: false + use_gpu: true + epoch_num: 1000 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_r50_icdar15/ + save_epoch_step: 200 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: false + pretrained_model: ./pretrain_models/ResNet50_dcn_asf_synthtext_pretrained + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt +Architecture: + model_type: det + algorithm: DB++ + Transform: null + Backbone: + name: ResNet + layers: 50 + dcn_stage: [False, True, True, True] + Neck: + name: DBFPN + out_channels: 256 + use_asf: True + Head: + name: DBHead + k: 50 +Loss: + name: DBLoss + balance_loss: true + main_loss_type: BCELoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: DecayLearningRate + learning_rate: 0.007 + epochs: 1000 + factor: 0.9 + end_lr: 0 + weight_decay: 0.0001 +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.6 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DetMetric + main_indicator: hmean +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: + - 1.0 + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 640 + - 640 + max_tries: 10 + keep_ratio: true + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - NormalizeImage: + scale: 1./255. + mean: + - 0.48109378172549 + - 0.45752457890196 + - 0.40787054090196 + std: + - 1.0 + - 1.0 + - 1.0 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 4 + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: + image_shape: + - 1152 + - 2048 + - NormalizeImage: + scale: 1./255. + mean: + - 0.48109378172549 + - 0.45752457890196 + - 0.40787054090196 + std: + - 1.0 + - 1.0 + - 1.0 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 +profiler_options: null diff --git a/configs/det/det_r50_db++_td_tr.yml b/configs/det/det_r50_db++_td_tr.yml new file mode 100644 index 0000000..65021bb --- /dev/null +++ b/configs/det/det_r50_db++_td_tr.yml @@ -0,0 +1,166 @@ +Global: + debug: false + use_gpu: true + epoch_num: 1000 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_r50_td_tr/ + save_epoch_step: 200 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: false + pretrained_model: ./pretrain_models/ResNet50_dcn_asf_synthtext_pretrained + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./checkpoints/det_db/predicts_db.txt +Architecture: + model_type: det + algorithm: DB++ + Transform: null + Backbone: + name: ResNet + layers: 50 + dcn_stage: [False, True, True, True] + Neck: + name: DBFPN + out_channels: 256 + use_asf: True + Head: + name: DBHead + k: 50 +Loss: + name: DBLoss + balance_loss: true + main_loss_type: BCELoss + alpha: 5 + beta: 10 + ohem_ratio: 3 +Optimizer: + name: Momentum + momentum: 0.9 + lr: + name: DecayLearningRate + learning_rate: 0.007 + epochs: 1000 + factor: 0.9 + end_lr: 0 + weight_decay: 0.0001 +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.5 + max_candidates: 1000 + unclip_ratio: 1.5 +Metric: + name: DetMetric + main_indicator: hmean +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: + - ./train_data/TD_TR/TD500/train_gt_labels.txt + - ./train_data/TD_TR/TR400/gt_labels.txt + ratio_list: + - 1.0 + - 1.0 + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - IaaAugment: + augmenter_args: + - type: Fliplr + args: + p: 0.5 + - type: Affine + args: + rotate: + - -10 + - 10 + - type: Resize + args: + size: + - 0.5 + - 3 + - EastRandomCropData: + size: + - 640 + - 640 + max_tries: 10 + keep_ratio: true + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - NormalizeImage: + scale: 1./255. + mean: + - 0.48109378172549 + - 0.45752457890196 + - 0.40787054090196 + std: + - 1.0 + - 1.0 + - 1.0 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - threshold_map + - threshold_mask + - shrink_map + - shrink_mask + loader: + shuffle: true + drop_last: false + batch_size_per_card: 4 + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: + - ./train_data/TD_TR/TD500/test_gt_labels.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - DetLabelEncode: null + - DetResizeForTest: + image_shape: + - 736 + - 736 + keep_ratio: True + - NormalizeImage: + scale: 1./255. + mean: + - 0.48109378172549 + - 0.45752457890196 + - 0.40787054090196 + std: + - 1.0 + - 1.0 + - 1.0 + order: hwc + - ToCHWImage: null + - KeepKeys: + keep_keys: + - image + - shape + - polys + - ignore_tags + loader: + shuffle: false + drop_last: false + batch_size_per_card: 1 + num_workers: 2 +profiler_options: null diff --git a/configs/det/det_r50_vd_db.yml b/configs/det/det_r50_vd_db.yml new file mode 100644 index 0000000..b1c4a04 --- /dev/null +++ b/configs/det/det_r50_vd_db.yml @@ -0,0 +1,130 @@ +Global: + use_gpu: true + epoch_num: 1200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_r50_vd/ + save_epoch_step: 1200 + # evaluation is run every 2000 iterations + eval_batch_step: [0,2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_db/predicts_db.txt + +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: ResNet_vd + layers: 50 + Neck: + name: DBFPN + out_channels: 256 + Head: + name: DBHead + k: 50 + +Loss: + name: DBLoss + balance_loss: true + main_loss_type: DiceLoss + alpha: 5 + beta: 10 + ohem_ratio: 3 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: DBPostProcess + thresh: 0.3 + box_thresh: 0.7 + max_candidates: 1000 + unclip_ratio: 1.5 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - IaaAugment: + augmenter_args: + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [-10, 10] } } + - { 'type': Resize, 'args': { 'size': [0.5, 3] } } + - EastRandomCropData: + size: [640, 640] + max_tries: 50 + keep_ratio: true + - MakeBorderMap: + shrink_ratio: 0.4 + thresh_min: 0.3 + thresh_max: 0.7 + - MakeShrinkMap: + shrink_ratio: 0.4 + min_text_size: 8 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'threshold_map', 'threshold_mask', 'shrink_map', 'shrink_mask'] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 16 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + image_shape: [736, 1280] + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 \ No newline at end of file diff --git a/configs/det/det_r50_vd_dcn_fce_ctw.yml b/configs/det/det_r50_vd_dcn_fce_ctw.yml new file mode 100644 index 0000000..3a4075b --- /dev/null +++ b/configs/det/det_r50_vd_dcn_fce_ctw.yml @@ -0,0 +1,139 @@ +Global: + use_gpu: true + epoch_num: 1500 + log_smooth_window: 20 + print_batch_step: 20 + save_model_dir: ./output/det_r50_dcn_fce_ctw/ + save_epoch_step: 100 + # evaluation is run every 835 iterations + eval_batch_step: [0, 835] + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_fce/predicts_fce.txt + + +Architecture: + model_type: det + algorithm: FCE + Transform: + Backbone: + name: ResNet_vd + layers: 50 + dcn_stage: [False, True, True, True] + out_indices: [1,2,3] + Neck: + name: FCEFPN + out_channels: 256 + has_extra_convs: False + extra_stage: 0 + Head: + name: FCEHead + fourier_degree: 5 +Loss: + name: FCELoss + fourier_degree: 5 + num_sample: 50 + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0001 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: FCEPostProcess + scales: [8, 16, 32] + alpha: 1.0 + beta: 1.0 + fourier_degree: 5 + box_type: 'poly' + +Metric: + name: DetFCEMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ctw1500/imgs/ + label_file_list: + - ./train_data/ctw1500/imgs/training.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + ignore_orientation: True + - DetLabelEncode: # Class handling label + - ColorJitter: + brightness: 0.142 + saturation: 0.5 + contrast: 0.5 + - RandomScaling: + - RandomCropFlip: + crop_ratio: 0.5 + - RandomCropPolyInstances: + crop_ratio: 0.8 + min_side_ratio: 0.3 + - RandomRotatePolyInstances: + rotate_ratio: 0.5 + max_angle: 30 + pad_with_fixed_color: False + - SquareResizePad: + target_size: 800 + pad_ratio: 0.6 + - IaaAugment: + augmenter_args: + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - FCENetTargets: + fourier_degree: 5 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'p3_maps', 'p4_maps', 'p5_maps'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 6 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ctw1500/imgs/ + label_file_list: + - ./train_data/ctw1500/imgs/test.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + ignore_orientation: True + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_type: 'min' + limit_side_len: 736 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - Pad: + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/det/det_r50_vd_east.yml b/configs/det/det_r50_vd_east.yml new file mode 100644 index 0000000..745a266 --- /dev/null +++ b/configs/det/det_r50_vd_east.yml @@ -0,0 +1,110 @@ +Global: + use_gpu: true + epoch_num: 10000 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/east_r50_vd/ + save_epoch_step: 1000 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [4000, 5000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_pretrained/ + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + save_res_path: ./output/det_east/predicts_east.txt + +Architecture: + model_type: det + algorithm: EAST + Transform: + Backbone: + name: ResNet_vd + layers: 50 + Neck: + name: EASTFPN + model_name: large + Head: + name: EASTHead + model_name: large + +Loss: + name: EASTLoss + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + # name: Cosine + learning_rate: 0.001 + # warmup_epoch: 0 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: EASTPostProcess + score_thresh: 0.8 + cover_thresh: 0.1 + nms_thresh: 0.2 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [1.0] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - EASTProcessTrain: + image_shape: [512, 512] + background_ratio: 0.125 + min_crop_side_ratio: 0.1 + min_text_size: 10 + - KeepKeys: + keep_keys: ['image', 'score_map', 'geo_map', 'training_mask'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 2400 + limit_type: max + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/det/det_r50_vd_pse.yml b/configs/det/det_r50_vd_pse.yml new file mode 100644 index 0000000..ac00718 --- /dev/null +++ b/configs/det/det_r50_vd_pse.yml @@ -0,0 +1,134 @@ +Global: + use_gpu: true + epoch_num: 600 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_r50_vd_pse/ + save_epoch_step: 600 + # evaluation is run every 125 iterations + eval_batch_step: [ 0,125 ] + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained + checkpoints: #./output/det_r50_vd_pse_batch8_ColorJitter/best_accuracy + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_pse/predicts_pse.txt + +Architecture: + model_type: det + algorithm: PSE + Transform: + Backbone: + name: ResNet_vd + layers: 50 + Neck: + name: FPN + out_channels: 256 + Head: + name: PSEHead + hidden_dim: 256 + out_channels: 7 + +Loss: + name: PSELoss + alpha: 0.7 + ohem_ratio: 3 + kernel_sample_mask: pred + reduction: none + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Step + learning_rate: 0.0001 + step_size: 200 + gamma: 0.1 + regularizer: + name: 'L2' + factor: 0.0005 + +PostProcess: + name: PSEPostProcess + thresh: 0 + box_thresh: 0.85 + min_area: 16 + box_type: box # 'box' or 'poly' + scale: 1 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - ColorJitter: + brightness: 0.12549019607843137 + saturation: 0.5 + - IaaAugment: + augmenter_args: + - { 'type': Resize, 'args': { 'size': [ 0.5, 3 ] } } + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [ -10, 10 ] } } + - MakePseGt: + kernel_num: 7 + min_shrink_ratio: 0.4 + size: 640 + - RandomCropImgMask: + size: [ 640,640 ] + main_key: gt_text + crop_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 736 + limit_type: min + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'shape', 'polys', 'ignore_tags' ] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 \ No newline at end of file diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml new file mode 100644 index 0000000..a989bc8 --- /dev/null +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -0,0 +1,110 @@ +Global: + use_gpu: true + epoch_num: 5000 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/sast_r50_vd_ic15/ + save_epoch_step: 1000 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [4000, 5000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + save_res_path: ./output/sast_r50_vd_ic15/predicts_sast.txt + +Architecture: + model_type: det + algorithm: SAST + Transform: + Backbone: + name: ResNet_SAST + layers: 50 + Neck: + name: SASTFPN + with_cab: True + Head: + name: SASTHead + +Loss: + name: SASTLoss + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + # name: Cosine + learning_rate: 0.001 + # warmup_epoch: 0 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: SASTPostProcess + score_thresh: 0.5 + sample_pts_num: 2 + nms_thresh: 0.2 + expand_scale: 1.0 + shrink_ratio_of_width: 0.3 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: [./train_data/icdar2013/train_label_json.txt, ./train_data/icdar2015/train_label_json.txt, ./train_data/icdar17_mlt_latin/train_label_json.txt, ./train_data/coco_text_icdar_4pts/train_label_json.txt] + ratio_list: [0.1, 0.45, 0.3, 0.15] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - SASTProcessTrain: + image_shape: [512, 512] + min_crop_side_ratio: 0.3 + min_crop_size: 24 + min_text_size: 4 + max_text_size: 512 + - KeepKeys: + keep_keys: ['image', 'score_map', 'border_map', 'training_mask', 'tvo_map', 'tco_map'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 4 + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + resize_long: 1536 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml new file mode 100644 index 0000000..e040c42 --- /dev/null +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -0,0 +1,110 @@ +Global: + use_gpu: true + epoch_num: 5000 + log_smooth_window: 20 + print_batch_step: 2 + save_model_dir: ./output/sast_r50_vd_tt/ + save_epoch_step: 1000 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [4000, 5000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + load_static_weights: True + cal_metric_during_train: False + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + save_res_path: ./output/sast_r50_vd_tt/predicts_sast.txt + +Architecture: + model_type: det + algorithm: SAST + Transform: + Backbone: + name: ResNet_SAST + layers: 50 + Neck: + name: SASTFPN + with_cab: True + Head: + name: SASTHead + +Loss: + name: SASTLoss + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + # name: Cosine + learning_rate: 0.001 + # warmup_epoch: 0 + regularizer: + name: 'L2' + factor: 0 + +PostProcess: + name: SASTPostProcess + score_thresh: 0.5 + sample_pts_num: 6 + nms_thresh: 0.2 + expand_scale: 1.2 + shrink_ratio_of_width: 0.2 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: [./train_data/art_latin_icdar_14pt/train_no_tt_test/train_label_json.txt, ./train_data/total_text_icdar_14pt/train_label_json.txt] + ratio_list: [0.5, 0.5] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - SASTProcessTrain: + image_shape: [512, 512] + min_crop_side_ratio: 0.3 + min_crop_size: 24 + min_text_size: 4 + max_text_size: 512 + - KeepKeys: + keep_keys: ['image', 'score_map', 'border_map', 'training_mask', 'tvo_map', 'tco_map'] # dataloader will return list in this order + loader: + shuffle: True + drop_last: False + batch_size_per_card: 4 + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: + - ./train_data/total_text_icdar_14pt/test_label_json.txt + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + resize_long: 768 + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'shape', 'polys', 'ignore_tags'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/e2e/e2e_r50_vd_pg.yml b/configs/e2e/e2e_r50_vd_pg.yml new file mode 100644 index 0000000..0a232f7 --- /dev/null +++ b/configs/e2e/e2e_r50_vd_pg.yml @@ -0,0 +1,114 @@ +Global: + use_gpu: True + epoch_num: 600 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/pgnet_r50_vd_totaltext/ + save_epoch_step: 10 + # evaluation is run every 0 iterationss after the 1000th iteration + eval_batch_step: [ 0, 1000 ] + # 1. If pretrained_model is saved in static mode, such as classification pretrained model + # from static branch, load_static_weights must be set as True. + # 2. If you want to finetune the pretrained models we provide in the docs, + # you should set load_static_weights as False. + load_static_weights: False + cal_metric_during_train: False + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + valid_set: totaltext # two mode: totaltext valid curved words, partvgg valid non-curved words + save_res_path: ./output/pgnet_r50_vd_totaltext/predicts_pgnet.txt + character_dict_path: ppocr/utils/ic15_dict.txt + character_type: EN + max_text_length: 50 # the max length in seq + max_text_nums: 30 # the max seq nums in a pic + tcl_len: 64 + +Architecture: + model_type: e2e + algorithm: PGNet + Transform: + Backbone: + name: ResNet + layers: 50 + Neck: + name: PGFPN + Head: + name: PGHead + +Loss: + name: PGLoss + tcl_bs: 64 + max_text_length: 50 # the same as Global: max_text_length + max_text_nums: 30 # the same as Global:max_text_nums + pad_num: 36 # the length of dict for pad + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0 + + +PostProcess: + name: PGPostProcess + score_thresh: 0.5 +Metric: + name: E2EMetric + character_dict_path: ppocr/utils/ic15_dict.txt + main_indicator: f_score_e2e + +Train: + dataset: + name: PGDataSet + label_file_list: [.././train_data/total_text/train/] + ratio_list: [1.0] + data_format: icdar #two data format: icdar/textnet + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - PGProcessTrain: + batch_size: 14 # same as loader: batch_size_per_card + min_crop_size: 24 + min_text_size: 4 + max_text_size: 512 + - KeepKeys: + keep_keys: [ 'images', 'tcl_maps', 'tcl_label_maps', 'border_maps','direction_maps', 'training_masks', 'label_list', 'pos_list', 'pos_mask' ] # dataloader will return list in this order + loader: + shuffle: True + drop_last: True + batch_size_per_card: 14 + num_workers: 16 + +Eval: + dataset: + name: PGDataSet + data_dir: ./train_data/ + label_file_list: [./train_data/total_text/test/] + transforms: + - DecodeImage: # load image + img_mode: RGB + channel_first: False + - E2ELabelEncode: + - E2EResizeForTest: + max_side_len: 768 + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'shape', 'polys', 'strs', 'tags' ] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 2 \ No newline at end of file diff --git a/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml new file mode 100644 index 0000000..2d4261f --- /dev/null +++ b/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v3 + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml b/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml new file mode 100644 index 0000000..e7cbae5 --- /dev/null +++ b/configs/rec/PP-OCRv3/ch_PP-OCRv3_rec_distillation.yml @@ -0,0 +1,205 @@ +Global: + debug: false + use_gpu: true + epoch_num: 800 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v3_distillation + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_distillation.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + decay_epochs : [700, 800] + values : [0.0005, 0.00005] + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: &model_type "rec" + name: DistillationModel + algorithm: Distillation + Models: + Teacher: + pretrained: + freeze_params: false + return_all_feats: true + model_type: *model_type + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + fc_decay: 0.00001 + - SARHead: + enc_dim: 512 + max_text_length: *max_text_length + Student: + pretrained: + freeze_params: false + return_all_feats: true + model_type: *model_type + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + fc_decay: 0.00001 + - SARHead: + enc_dim: 512 + max_text_length: *max_text_length +Loss: + name: CombinedLoss + loss_config_list: + - DistillationDMLLoss: + weight: 1.0 + act: "softmax" + use_log: true + model_name_pairs: + - ["Student", "Teacher"] + key: head_out + multi_head: True + dis_head: ctc + name: dml_ctc + - DistillationDMLLoss: + weight: 0.5 + act: "softmax" + use_log: true + model_name_pairs: + - ["Student", "Teacher"] + key: head_out + multi_head: True + dis_head: sar + name: dml_sar + - DistillationDistanceLoss: + weight: 1.0 + mode: "l2" + model_name_pairs: + - ["Student", "Teacher"] + key: backbone_out + - DistillationCTCLoss: + weight: 1.0 + model_name_list: ["Student", "Teacher"] + key: head_out + multi_head: True + - DistillationSARLoss: + weight: 1.0 + model_name_list: ["Student", "Teacher"] + key: head_out + multi_head: True + +PostProcess: + name: DistillationCTCLabelDecode + model_name: ["Student", "Teacher"] + key: head_out + multi_head: True + +Metric: + name: DistillationMetric + base_metric_name: RecMetric + main_indicator: acc + key: "Student" + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml new file mode 100644 index 0000000..af8b7ba --- /dev/null +++ b/configs/rec/PP-OCRv3/en_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_en_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/en_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_en.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/.gitkeep b/configs/rec/PP-OCRv3/multi_language/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml new file mode 100644 index 0000000..55cee5b --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/arabic_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_arabic_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/arabic_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_arabic.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/chinese_cht_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/chinese_cht_PP-OCRv3_rec.yml new file mode 100644 index 0000000..135a360 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/chinese_cht_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_chinese_cht_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/chinese_cht_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_chinese_cht.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml new file mode 100644 index 0000000..5365152 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/cyrillic_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_cyrillic_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/cyrillic_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_cyrillic.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml new file mode 100644 index 0000000..023b4af --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/devanagari_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_devanagari_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/devanagari_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_devanagari.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/japan_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/japan_PP-OCRv3_rec.yml new file mode 100644 index 0000000..065b327 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/japan_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_japan_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/japan_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_japan.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml new file mode 100644 index 0000000..5edb298 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/ka_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_ka_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/ka_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_ka.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/korean_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/korean_PP-OCRv3_rec.yml new file mode 100644 index 0000000..eebeb09 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/korean_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_korean_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/korean_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_korean.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml new file mode 100644 index 0000000..4f8e25b --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/latin_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_latin_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/latin_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_latin.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml new file mode 100644 index 0000000..15b8624 --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/ta_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_ta_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/ta_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_ta.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv3/multi_language/te_PP-OCRv3_rec.yml b/configs/rec/PP-OCRv3/multi_language/te_PP-OCRv3_rec.yml new file mode 100644 index 0000000..9074c8d --- /dev/null +++ b/configs/rec/PP-OCRv3/multi_language/te_PP-OCRv3_rec.yml @@ -0,0 +1,126 @@ +Global: + debug: false + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/v3_te_mobile + save_epoch_step: 3 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/dict/te_dict.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3_te.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + Backbone: + name: MobileNetV1Enhance + scale: 0.5 + last_conv_stride: [1, 2] + last_pool_type: avg + Neck: + name: SequenceEncoder + encoder_type: svtr + dims: 64 + depth: 2 + hidden_dims: 120 + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - SARLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + ignore_space: False + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + - RecAug: + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 4 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_sar + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec.yml new file mode 100644 index 0000000..158815e --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec.yml @@ -0,0 +1,138 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4 + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: pytorchocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: true + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_LCNet + Transform: + Backbone: + name: PPLCNetV3 + scale: 0.95 + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 192 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ampO2_ultra.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ampO2_ultra.yml new file mode 100644 index 0000000..475c551 --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ampO2_ultra.yml @@ -0,0 +1,140 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4 + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + use_amp: True + amp_level: O2 + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_LCNet + Transform: + Backbone: + name: PPLCNetV3 + scale: 0.95 + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 384 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 16 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 16 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ctc.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ctc.yml new file mode 100644 index 0000000..28901ed --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_ctc.yml @@ -0,0 +1,132 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4 + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: true + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_LCNet + Transform: + Backbone: + name: PPLCNetV3 + scale: 0.95 + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [ 1, 3 ] + use_guide: True + Head: + name: CTCHead + fc_decay: 0.00004 + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 192 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_distill.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_distill.yml new file mode 100644 index 0000000..f613ee5 --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_distill.yml @@ -0,0 +1,231 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_dkd_400w_svtr_ctc_lcnet_blank_dkd0.1/ + save_epoch_step: 40 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: ./output/rec_dkd_400w_svtr_ctc_lcnet_blank_dkd0.1/latest + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 2 + regularizer: + name: L2 + factor: 3.0e-05 +Architecture: + model_type: rec + name: DistillationModel + algorithm: Distillation + Models: + Teacher: + pretrained: + freeze_params: true + return_all_feats: true + model_type: rec + algorithm: SVTR + Transform: null + Backbone: + name: SVTRNet + img_size: + - 48 + - 320 + out_char_num: 40 + out_channels: 192 + patch_merging: Conv + embed_dim: + - 64 + - 128 + - 256 + depth: + - 3 + - 6 + - 3 + num_heads: + - 2 + - 4 + - 8 + mixer: + - Conv + - Conv + - Conv + - Conv + - Conv + - Conv + - Global + - Global + - Global + - Global + - Global + - Global + local_mixer: + - - 5 + - 5 + - - 5 + - 5 + - - 5 + - 5 + last_stage: false + prenorm: true + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + Student: + pretrained: + freeze_params: false + return_all_feats: true + model_type: rec + algorithm: SVTR + Transform: null + Backbone: + name: PPLCNetV3 + scale: 0.95 + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length +Loss: + name: CombinedLoss + loss_config_list: + - DistillationDKDLoss: + weight: 0.1 + model_name_pairs: + - - Student + - Teacher + key: head_out + multi_head: true + alpha: 1.0 + beta: 2.0 + dis_head: gtc + name: dkd + - DistillationCTCLoss: + weight: 1.0 + model_name_list: + - Student + key: head_out + multi_head: true + - DistillationNRTRLoss: + weight: 1.0 + smoothing: false + model_name_list: + - Student + key: head_out + multi_head: true + - DistillCTCLogits: + weight: 1.0 + reduction: mean + model_name_pairs: + - - Student + - Teacher + key: head_out +PostProcess: + name: DistillationCTCLabelDecode + model_name: + - Student + key: head_out + multi_head: true +Metric: + name: DistillationMetric + base_metric_name: RecMetric + main_indicator: acc + key: Student + ignore_space: false +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: + - ./train_data/train_list.txt + ratio_list: + - 1.0 + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: true + batch_size_per_card: 128 + drop_last: true + num_workers: 8 + use_shared_memory: true +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 +profiler_options: null diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_fp32_ultra.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_fp32_ultra.yml new file mode 100644 index 0000000..8c26730 --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_fp32_ultra.yml @@ -0,0 +1,138 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4 + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_LCNet + Transform: + Backbone: + name: PPLCNetV3 + scale: 0.95 + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 192 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 16 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 16 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet.yml new file mode 100644 index 0000000..c181da6 --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet.yml @@ -0,0 +1,137 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4_hgnet + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: pytorchocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_HGNet + Transform: + Backbone: + name: PPHGNet_small + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 128 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_ampO2_ultra.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_ampO2_ultra.yml new file mode 100644 index 0000000..4303521 --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_ampO2_ultra.yml @@ -0,0 +1,139 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4_hgnet + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + use_amp: True + amp_level: O2 + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_HGNet + Transform: + Backbone: + name: PPHGNet_small + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 256 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 16 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 16 diff --git a/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_fp32_ultra.yml b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_fp32_ultra.yml new file mode 100644 index 0000000..ee9ebca --- /dev/null +++ b/configs/rec/PP-OCRv4/ch_PP-OCRv4_rec_hgnet_fp32_ultra.yml @@ -0,0 +1,137 @@ +Global: + debug: false + use_gpu: true + epoch_num: 200 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4_hgnet + save_epoch_step: 10 + eval_batch_step: [0, 2000] + cal_metric_during_train: true + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: &max_text_length 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 + + +Architecture: + model_type: rec + algorithm: SVTR_HGNet + Transform: + Backbone: + name: PPHGNet_small + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: [1, 3] + use_guide: True + Head: + fc_decay: 0.00001 + - NRTRHead: + nrtr_dim: 384 + max_text_length: *max_text_length + +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: + - NRTRLoss: + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: [48, 320, 3] + max_text_length: *max_text_length + - RecAug: + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: [[320, 32], [320, 48], [320, 64]] + first_bs: &bs 256 + fix_bs: false + divided_factor: [8, 16] # w, h + is_training: True + loader: + shuffle: true + batch_size_per_card: *bs + drop_last: true + num_workers: 16 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: [3, 48, 320] + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 16 diff --git a/configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml b/configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml new file mode 100644 index 0000000..b355fb4 --- /dev/null +++ b/configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml @@ -0,0 +1,150 @@ +Global: + debug: false + use_gpu: true + epoch_num: 50 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_ppocr_v4 + save_epoch_step: 10 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: refactor + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: doc/imgs_words/ch/word_1.jpg + character_dict_path: pytorchocr/utils/en_dict.txt + max_text_length: 25 + infer_mode: false + use_space_char: true + distributed: true + save_res_path: ./output/rec/predicts_ppocrv3.txt +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 5 + regularizer: + name: L2 + factor: 3.0e-05 +Architecture: + model_type: rec + algorithm: SVTR_LCNet + Transform: null + Backbone: + name: PPLCNetV3 + scale: 0.95 + Head: + name: MultiHead + head_list: + - CTCHead: + Neck: + name: svtr + dims: 120 + depth: 2 + hidden_dims: 120 + kernel_size: + - 1 + - 3 + use_guide: true + Head: + fc_decay: 1.0e-05 + - NRTRHead: + nrtr_dim: 384 + max_text_length: 25 +Loss: + name: MultiLoss + loss_config_list: + - CTCLoss: null + - NRTRLoss: null +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc + ignore_space: false +Train: + dataset: + name: MultiScaleDataSet + ds_width: false + data_dir: ./train_data/ + ext_op_transform_idx: 1 + label_file_list: + - ./train_data/train_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecConAug: + prob: 0.5 + ext_data_num: 2 + image_shape: + - 48 + - 320 + - 3 + max_text_length: 25 + - RecAug: null + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + sampler: + name: MultiScaleSampler + scales: + - - 320 + - 32 + - - 320 + - 48 + - - 320 + - 64 + first_bs: 96 + fix_bs: false + divided_factor: + - 8 + - 16 + is_training: true + loader: + shuffle: true + batch_size_per_card: 96 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: + - ./train_data/val_list.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - MultiLabelEncode: + gtc_encode: NRTRLabelEncode + - RecResizeImg: + image_shape: + - 3 + - 48 + - 320 + - KeepKeys: + keep_keys: + - image + - label_ctc + - label_gtc + - length + - valid_ratio + loader: + shuffle: false + drop_last: false + batch_size_per_card: 128 + num_workers: 4 +profiler_options: null diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml new file mode 100644 index 0000000..1db3e1c --- /dev/null +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_common_train_v2.0.yml @@ -0,0 +1,100 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_chinese_common_v2.0 + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + character_type: ch + max_text_length: 25 + infer_mode: False + use_space_char: True + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00004 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 256 + Head: + name: CTCHead + fc_decay: 0.00004 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/val_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml new file mode 100644 index 0000000..dc9d650 --- /dev/null +++ b/configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_chinese_lite_v2.0 + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + character_type: ch + max_text_length: 25 + infer_mode: False + use_space_char: True + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data + label_file_list: ["./train_data/val_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/generate_multi_language_configs.py b/configs/rec/multi_language/generate_multi_language_configs.py new file mode 100644 index 0000000..7a65401 --- /dev/null +++ b/configs/rec/multi_language/generate_multi_language_configs.py @@ -0,0 +1,200 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from argparse import ArgumentParser, RawDescriptionHelpFormatter +import os.path +import logging +logging.basicConfig(level=logging.INFO) + +support_list = { + 'it': 'italian', + 'es': 'spanish', + 'pt': 'portuguese', + 'ru': 'russian', + 'ar': 'arabic', + 'ta': 'tamil', + 'ug': 'uyghur', + 'fa': 'persian', + 'ur': 'urdu', + 'rs_latin': 'serbian latin', + 'oc': 'occitan', + 'rs_cyrillic': 'serbian cyrillic', + 'bg': 'bulgarian', + 'uk': 'ukranian', + 'be': 'belarusian', + 'te': 'telugu', + 'kn': 'kannada', + 'ch_tra': 'chinese tradition', + 'hi': 'hindi', + 'mr': 'marathi', + 'ne': 'nepali', +} +assert (os.path.isfile("./rec_multi_language_lite_train.yml") + ), "Loss basic configuration file rec_multi_language_lite_train.yml.\ +You can download it from \ +https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/configs/rec/multi_language/" + +global_config = yaml.load( + open("./rec_multi_language_lite_train.yml", 'rb'), Loader=yaml.Loader) +project_path = os.path.abspath(os.path.join(os.getcwd(), "../../../")) + + +class ArgsParser(ArgumentParser): + def __init__(self): + super(ArgsParser, self).__init__( + formatter_class=RawDescriptionHelpFormatter) + self.add_argument( + "-o", "--opt", nargs='+', help="set configuration options") + self.add_argument( + "-l", + "--language", + nargs='+', + help="set language type, support {}".format(support_list)) + self.add_argument( + "--train", + type=str, + help="you can use this command to change the train dataset default path" + ) + self.add_argument( + "--val", + type=str, + help="you can use this command to change the eval dataset default path" + ) + self.add_argument( + "--dict", + type=str, + help="you can use this command to change the dictionary default path" + ) + self.add_argument( + "--data_dir", + type=str, + help="you can use this command to change the dataset default root path" + ) + + def parse_args(self, argv=None): + args = super(ArgsParser, self).parse_args(argv) + args.opt = self._parse_opt(args.opt) + args.language = self._set_language(args.language) + return args + + def _parse_opt(self, opts): + config = {} + if not opts: + return config + for s in opts: + s = s.strip() + k, v = s.split('=') + config[k] = yaml.load(v, Loader=yaml.Loader) + return config + + def _set_language(self, type): + assert (type), "please use -l or --language to choose language type" + assert( + type[0] in support_list.keys() + ),"the sub_keys(-l or --language) can only be one of support list: \n{},\nbut get: {}, " \ + "please check your running command".format(support_list, type) + global_config['Global'][ + 'character_dict_path'] = 'ppocr/utils/dict/{}_dict.txt'.format(type[ + 0]) + global_config['Global'][ + 'save_model_dir'] = './output/rec_{}_lite'.format(type[0]) + global_config['Train']['dataset'][ + 'label_file_list'] = ["train_data/{}_train.txt".format(type[0])] + global_config['Eval']['dataset'][ + 'label_file_list'] = ["train_data/{}_val.txt".format(type[0])] + global_config['Global']['character_type'] = type[0] + assert ( + os.path.isfile( + os.path.join(project_path, global_config['Global'][ + 'character_dict_path'])) + ), "Loss default dictionary file {}_dict.txt.You can download it from \ +https://github.com/PaddlePaddle/PaddleOCR/tree/dygraph/ppocr/utils/dict/".format( + type[0]) + return type[0] + + +def merge_config(config): + """ + Merge config into global config. + Args: + config (dict): Config to be merged. + Returns: global config + """ + for key, value in config.items(): + if "." not in key: + if isinstance(value, dict) and key in global_config: + global_config[key].update(value) + else: + global_config[key] = value + else: + sub_keys = key.split('.') + assert ( + sub_keys[0] in global_config + ), "the sub_keys can only be one of global_config: {}, but get: {}, please check your running command".format( + global_config.keys(), sub_keys[0]) + cur = global_config[sub_keys[0]] + for idx, sub_key in enumerate(sub_keys[1:]): + if idx == len(sub_keys) - 2: + cur[sub_key] = value + else: + cur = cur[sub_key] + + +def loss_file(path): + assert ( + os.path.exists(path) + ), "There is no such file:{},Please do not forget to put in the specified file".format( + path) + + +if __name__ == '__main__': + FLAGS = ArgsParser().parse_args() + merge_config(FLAGS.opt) + save_file_path = 'rec_{}_lite_train.yml'.format(FLAGS.language) + if os.path.isfile(save_file_path): + os.remove(save_file_path) + + if FLAGS.train: + global_config['Train']['dataset']['label_file_list'] = [FLAGS.train] + train_label_path = os.path.join(project_path, FLAGS.train) + loss_file(train_label_path) + if FLAGS.val: + global_config['Eval']['dataset']['label_file_list'] = [FLAGS.val] + eval_label_path = os.path.join(project_path, FLAGS.val) + loss_file(eval_label_path) + if FLAGS.dict: + global_config['Global']['character_dict_path'] = FLAGS.dict + dict_path = os.path.join(project_path, FLAGS.dict) + loss_file(dict_path) + if FLAGS.data_dir: + global_config['Eval']['dataset']['data_dir'] = FLAGS.data_dir + global_config['Train']['dataset']['data_dir'] = FLAGS.data_dir + data_dir = os.path.join(project_path, FLAGS.data_dir) + loss_file(data_dir) + + with open(save_file_path, 'w') as f: + yaml.dump( + dict(global_config), f, default_flow_style=False, sort_keys=False) + logging.info("Project path is :{}".format(project_path)) + logging.info("Train list path set to :{}".format(global_config['Train'][ + 'dataset']['label_file_list'][0])) + logging.info("Eval list path set to :{}".format(global_config['Eval'][ + 'dataset']['label_file_list'][0])) + logging.info("Dataset root path set to :{}".format(global_config['Eval'][ + 'dataset']['data_dir'])) + logging.info("Dict path set to :{}".format(global_config['Global'][ + 'character_dict_path'])) + logging.info("Config file set to :configs/rec/multi_language/{}". + format(save_file_path)) diff --git a/configs/rec/multi_language/rec_arabic_lite_train.yml b/configs/rec/multi_language/rec_arabic_lite_train.yml new file mode 100644 index 0000000..6dcfd1b --- /dev/null +++ b/configs/rec/multi_language/rec_arabic_lite_train.yml @@ -0,0 +1,111 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_arabic_lite + save_epoch_step: 3 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: null + character_dict_path: ppocr/utils/dict/arabic_dict.txt + character_type: arabic + max_text_length: 25 + infer_mode: false + use_space_char: true +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: L2 + factor: 1.0e-05 +Architecture: + model_type: rec + algorithm: CRNN + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: + - 1 + - 2 + - 2 + - 2 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 1.0e-05 +Loss: + name: CTCLoss +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/arabic_train.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: null + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: true + batch_size_per_card: 256 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/arabic_val.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: false + drop_last: false + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_cyrillic_lite_train.yml b/configs/rec/multi_language/rec_cyrillic_lite_train.yml new file mode 100644 index 0000000..52527c1 --- /dev/null +++ b/configs/rec/multi_language/rec_cyrillic_lite_train.yml @@ -0,0 +1,111 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_cyrillic_lite + save_epoch_step: 3 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: null + character_dict_path: ppocr/utils/dict/cyrillic_dict.txt + character_type: cyrillic + max_text_length: 25 + infer_mode: false + use_space_char: true +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: L2 + factor: 1.0e-05 +Architecture: + model_type: rec + algorithm: CRNN + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: + - 1 + - 2 + - 2 + - 2 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 1.0e-05 +Loss: + name: CTCLoss +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/cyrillic_train.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: null + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: true + batch_size_per_card: 256 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/cyrillic_val.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: false + drop_last: false + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_devanagari_lite_train.yml b/configs/rec/multi_language/rec_devanagari_lite_train.yml new file mode 100644 index 0000000..e1a7c82 --- /dev/null +++ b/configs/rec/multi_language/rec_devanagari_lite_train.yml @@ -0,0 +1,111 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_devanagari_lite + save_epoch_step: 3 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: null + character_dict_path: ppocr/utils/dict/devanagari_dict.txt + character_type: devanagari + max_text_length: 25 + infer_mode: false + use_space_char: true +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: L2 + factor: 1.0e-05 +Architecture: + model_type: rec + algorithm: CRNN + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: + - 1 + - 2 + - 2 + - 2 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 1.0e-05 +Loss: + name: CTCLoss +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/devanagari_train.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: null + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: true + batch_size_per_card: 256 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/devanagari_val.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: false + drop_last: false + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_en_number_lite_train.yml b/configs/rec/multi_language/rec_en_number_lite_train.yml new file mode 100644 index 0000000..13eda84 --- /dev/null +++ b/configs/rec/multi_language/rec_en_number_lite_train.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_en_number_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict/en_dict.txt + character_type: EN + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/eval_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_french_lite_train.yml b/configs/rec/multi_language/rec_french_lite_train.yml new file mode 100644 index 0000000..63378d3 --- /dev/null +++ b/configs/rec/multi_language/rec_french_lite_train.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_french_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict/french_dict.txt + character_type: french + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/eval_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_german_lite_train.yml b/configs/rec/multi_language/rec_german_lite_train.yml new file mode 100644 index 0000000..1651510 --- /dev/null +++ b/configs/rec/multi_language/rec_german_lite_train.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_german_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict/german_dict.txt + character_type: german + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/eval_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_japan_lite_train.yml b/configs/rec/multi_language/rec_japan_lite_train.yml new file mode 100644 index 0000000..bb47584 --- /dev/null +++ b/configs/rec/multi_language/rec_japan_lite_train.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_japan_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict/japan_dict.txt + character_type: japan + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/eval_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_korean_lite_train.yml b/configs/rec/multi_language/rec_korean_lite_train.yml new file mode 100644 index 0000000..77f1552 --- /dev/null +++ b/configs/rec/multi_language/rec_korean_lite_train.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_korean_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict/korean_dict.txt + character_type: korean + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/eval_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_latin_lite_train.yml b/configs/rec/multi_language/rec_latin_lite_train.yml new file mode 100644 index 0000000..e71112b --- /dev/null +++ b/configs/rec/multi_language/rec_latin_lite_train.yml @@ -0,0 +1,111 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_latin_lite + save_epoch_step: 3 + eval_batch_step: + - 0 + - 2000 + cal_metric_during_train: true + pretrained_model: null + checkpoints: null + save_inference_dir: null + use_visualdl: false + infer_img: null + character_dict_path: ppocr/utils/dict/latin_dict.txt + character_type: latin + max_text_length: 25 + infer_mode: false + use_space_char: true +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: L2 + factor: 1.0e-05 +Architecture: + model_type: rec + algorithm: CRNN + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: + - 1 + - 2 + - 2 + - 2 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 1.0e-05 +Loss: + name: CTCLoss +PostProcess: + name: CTCLabelDecode +Metric: + name: RecMetric + main_indicator: acc +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/latin_train.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - RecAug: null + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: true + batch_size_per_card: 256 + drop_last: true + num_workers: 8 +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: + - train_data/latin_val.txt + transforms: + - DecodeImage: + img_mode: BGR + channel_first: false + - CTCLabelEncode: null + - RecResizeImg: + image_shape: + - 3 + - 32 + - 320 + - KeepKeys: + keep_keys: + - image + - label + - length + loader: + shuffle: false + drop_last: false + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/multi_language/rec_multi_language_lite_train.yml b/configs/rec/multi_language/rec_multi_language_lite_train.yml new file mode 100644 index 0000000..c42a3d1 --- /dev/null +++ b/configs/rec/multi_language/rec_multi_language_lite_train.yml @@ -0,0 +1,103 @@ +Global: + use_gpu: True + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_multi_language_lite + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: + # Set the language of training, if set, select the default dictionary file + character_type: + max_text_length: 25 + infer_mode: False + use_space_char: True + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ + label_file_list: ["./train_data/val_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/rec_d28_can.yml b/configs/rec/rec_d28_can.yml new file mode 100644 index 0000000..b6aa26b --- /dev/null +++ b/configs/rec/rec_d28_can.yml @@ -0,0 +1,122 @@ +Global: + use_gpu: True + epoch_num: 240 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/can/ + save_epoch_step: 1 + # evaluation is run every 1105 iterations (1 epoch)(batch_size = 8) + eval_batch_step: [0, 1105] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/datasets/crohme_demo/hme_00.jpg + # for data or label process + character_dict_path: ppocr/utils/dict/latex_symbol_dict.txt + max_text_length: 36 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_can.txt + +Optimizer: + name: Momentum + momentum: 0.9 + clip_norm_global: 100.0 + lr: + name: TwoStepCosine + learning_rate: 0.01 + warmup_epoch: 1 + weight_decay: 0.0001 + +Architecture: + model_type: rec + algorithm: CAN + in_channels: 1 + Transform: + Backbone: + name: DenseNet + growthRate: 24 + reduction: 0.5 + bottleneck: True + use_dropout: True + input_channel: 1 + Head: + name: CANHead + in_channel: 684 + out_channel: 111 + max_text_length: 36 + ratio: 16 + attdecoder: + is_train: False + input_size: 256 + hidden_size: 256 + encoder_out_channel: 684 + dropout: True + dropout_ratio: 0.5 + word_num: 111 + counting_decoder_out_channel: 111 + attention: + attention_dim: 512 + word_conv_kernel: 1 + +Loss: + name: CANLoss + +PostProcess: + name: CANLabelDecode + +Metric: + name: CANMetric + main_indicator: exp_rate + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/CROHME/training/images/ + label_file_list: ["./train_data/CROHME/training/labels.txt"] + transforms: + - DecodeImage: + channel_first: False + - NormalizeImage: + mean: [0,0,0] + std: [1,1,1] + order: 'hwc' + - GrayImageChannelFormat: + inverse: True + - CANLabelEncode: + lower: False + - KeepKeys: + keep_keys: ['image', 'label'] + loader: + shuffle: True + batch_size_per_card: 8 + drop_last: False + num_workers: 4 + collate_fn: DyMaskCollator + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/CROHME/evaluation/images/ + label_file_list: ["./train_data/CROHME/evaluation/labels.txt"] + transforms: + - DecodeImage: + channel_first: False + - NormalizeImage: + mean: [0,0,0] + std: [1,1,1] + order: 'hwc' + - GrayImageChannelFormat: + inverse: True + - CANLabelEncode: + lower: False + - KeepKeys: + keep_keys: ['image', 'label'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 + num_workers: 4 + collate_fn: DyMaskCollator diff --git a/configs/rec/rec_efficientb3_fpn_pren.yml b/configs/rec/rec_efficientb3_fpn_pren.yml new file mode 100644 index 0000000..0fac6a7 --- /dev/null +++ b/configs/rec/rec_efficientb3_fpn_pren.yml @@ -0,0 +1,92 @@ +Global: + use_gpu: True + epoch_num: 8 + log_smooth_window: 20 + print_batch_step: 5 + save_model_dir: ./output/rec/pren_new + save_epoch_step: 3 + # evaluation is run every 2000 iterations after the 4000th iteration + eval_batch_step: [4000, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + max_text_length: &max_text_length 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_pren.txt + +Optimizer: + name: Adadelta + lr: + name: Piecewise + decay_epochs: [2, 5, 7] + values: [0.5, 0.1, 0.01, 0.001] + +Architecture: + model_type: rec + algorithm: PREN + in_channels: 3 + Backbone: + name: EfficientNetb3_PREN + Neck: + name: PRENFPN + n_r: 5 + d_model: 384 + max_len: *max_text_length + dropout: 0.1 + Head: + name: PRENHead + +Loss: + name: PRENLoss + +PostProcess: + name: PRENLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: + img_mode: BGR + channel_first: False + - PRENLabelEncode: + - RecAug: + - PRENResizeImg: + image_shape: [64, 256] # h,w + - KeepKeys: + keep_keys: ['image', 'label'] + loader: + shuffle: True + batch_size_per_card: 128 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: + img_mode: BGR + channel_first: False + - PRENLabelEncode: + - PRENResizeImg: + image_shape: [64, 256] # h,w + - KeepKeys: + keep_keys: ['image', 'label'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 64 + num_workers: 8 diff --git a/configs/rec/rec_icdar15_train.yml b/configs/rec/rec_icdar15_train.yml new file mode 100644 index 0000000..8a743b5 --- /dev/null +++ b/configs/rec/rec_icdar15_train.yml @@ -0,0 +1,100 @@ +Global: + use_gpu: true + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/ic15/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: ppocr/utils/ic15_dict.txt + character_type: ch + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: CTCHead + fc_decay: 0 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + use_shared_memory: False + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ + label_file_list: ["./train_data/train_list.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 + use_shared_memory: False diff --git a/configs/rec/rec_mtb_nrtr.yml b/configs/rec/rec_mtb_nrtr.yml new file mode 100644 index 0000000..765e282 --- /dev/null +++ b/configs/rec/rec_mtb_nrtr.yml @@ -0,0 +1,101 @@ +Global: + use_gpu: True + epoch_num: 21 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/nrtr/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: pytorchocr/utils/EN_symbol_dict.txt + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_nrtr.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.99 + clip_norm: 5.0 + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + regularizer: + name: 'L2' + factor: 0. + +Architecture: + model_type: rec + algorithm: NRTR + in_channels: 1 + Transform: + Backbone: + name: MTB + cnn_num: 2 + Head: + name: Transformer + d_model: 512 + num_encoder_layers: 6 + beam_size: -1 # When Beam size is greater than 0, it means to use beam search when evaluation. + + +Loss: + name: NRTRLoss + smoothing: True + +PostProcess: + name: NRTRLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - NRTRLabelEncode: # Class handling label + - NRTRRecResizeImg: + image_shape: [100, 32] + resize_type: PIL # PIL or OpenCV + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - NRTRLabelEncode: # Class handling label + - NRTRRecResizeImg: + image_shape: [100, 32] + resize_type: PIL # PIL or OpenCV + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 1 + use_shared_memory: False diff --git a/configs/rec/rec_mv3_none_bilstm_ctc.yml b/configs/rec/rec_mv3_none_bilstm_ctc.yml new file mode 100644 index 0000000..00c1db8 --- /dev/null +++ b/configs/rec/rec_mv3_none_bilstm_ctc.yml @@ -0,0 +1,96 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/mv3_none_bilstm_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: CTCHead + fc_decay: 0 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 diff --git a/configs/rec/rec_mv3_none_none_ctc.yml b/configs/rec/rec_mv3_none_none_ctc.yml new file mode 100644 index 0000000..6711b1d --- /dev/null +++ b/configs/rec/rec_mv3_none_none_ctc.yml @@ -0,0 +1,95 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/mv3_none_none_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: Rosetta + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + fc_decay: 0.0004 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/rec_mv3_tps_bilstm_att.yml b/configs/rec/rec_mv3_tps_bilstm_att.yml new file mode 100644 index 0000000..3cf1f7a --- /dev/null +++ b/configs/rec/rec_mv3_tps_bilstm_att.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_mv3_tps_bilstm_att/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: small + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: AttentionHead + hidden_size: 96 + + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 1 diff --git a/configs/rec/rec_mv3_tps_bilstm_ctc.yml b/configs/rec/rec_mv3_tps_bilstm_ctc.yml new file mode 100644 index 0000000..4e86709 --- /dev/null +++ b/configs/rec/rec_mv3_tps_bilstm_ctc.yml @@ -0,0 +1,100 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/mv3_tps_bilstm_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: STARNet + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: small + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: CTCHead + fc_decay: 0.0004 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 diff --git a/configs/rec/rec_r31_sar.yml b/configs/rec/rec_r31_sar.yml new file mode 100644 index 0000000..65e7877 --- /dev/null +++ b/configs/rec/rec_r31_sar.yml @@ -0,0 +1,98 @@ +Global: + use_gpu: true + epoch_num: 5 + log_smooth_window: 20 + print_batch_step: 20 + save_model_dir: ./sar_rec + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict90.txt + max_text_length: 30 + infer_mode: False + use_space_char: False + rm_symbol: True + save_res_path: ./output/rec/predicts_sar.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + decay_epochs: [3, 4] + values: [0.001, 0.0001, 0.00001] + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: SAR + Transform: + Backbone: + name: ResNet31 + Head: + name: SARHead + +Loss: + name: SARLoss + +PostProcess: + name: SARLabelDecode + +Metric: + name: RecMetric + + +Train: + dataset: + name: SimpleDataSet + label_file_list: ['./train_data/train_list.txt'] + data_dir: ./train_data/ + ratio_list: 1.0 + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SARLabelEncode: # Class handling label + - SARRecResizeImg: + image_shape: [3, 48, 48, 160] # h:48 w:[48,160] + width_downsample_ratio: 0.25 + - KeepKeys: + keep_keys: ['image', 'label', 'valid_ratio'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 64 + drop_last: True + num_workers: 8 + use_shared_memory: False + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SARLabelEncode: # Class handling label + - SARRecResizeImg: + image_shape: [3, 48, 48, 160] + width_downsample_ratio: 0.25 + - KeepKeys: + keep_keys: ['image', 'label', 'valid_ratio'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 64 + num_workers: 4 + use_shared_memory: False + diff --git a/configs/rec/rec_r34_vd_none_bilstm_ctc.yml b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml new file mode 100644 index 0000000..e4d301a --- /dev/null +++ b/configs/rec/rec_r34_vd_none_bilstm_ctc.yml @@ -0,0 +1,95 @@ +Global: + use_gpu: true + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/r34_vd_none_bilstm_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 256 + Head: + name: CTCHead + fc_decay: 0 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 diff --git a/configs/rec/rec_r34_vd_none_none_ctc.yml b/configs/rec/rec_r34_vd_none_none_ctc.yml new file mode 100644 index 0000000..4a17a00 --- /dev/null +++ b/configs/rec/rec_r34_vd_none_none_ctc.yml @@ -0,0 +1,93 @@ +Global: + use_gpu: true + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/r34_vd_none_none_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: Rosetta + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + fc_decay: 0.0004 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 diff --git a/configs/rec/rec_r34_vd_tps_bilstm_att.yml b/configs/rec/rec_r34_vd_tps_bilstm_att.yml new file mode 100644 index 0000000..659a172 --- /dev/null +++ b/configs/rec/rec_r34_vd_tps_bilstm_att.yml @@ -0,0 +1,101 @@ +Global: + use_gpu: True + epoch_num: 400 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/b3_rare_r34_none_gru/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00000 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: large + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 256 #96 + Head: + name: AttentionHead # AttentionHead + hidden_size: 256 # + l2_decay: 0.00001 + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml new file mode 100644 index 0000000..7e9d13a --- /dev/null +++ b/configs/rec/rec_r34_vd_tps_bilstm_ctc.yml @@ -0,0 +1,99 @@ +Global: + use_gpu: true + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/r34_vd_tps_bilstm_ctc/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: STARNet + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: large + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: reshape + hidden_size: 256 + Head: + name: CTCHead + fc_decay: 0 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 4 diff --git a/configs/rec/rec_r50_fpn_srn.yml b/configs/rec/rec_r50_fpn_srn.yml new file mode 100644 index 0000000..6b38616 --- /dev/null +++ b/configs/rec/rec_r50_fpn_srn.yml @@ -0,0 +1,107 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 5 + save_model_dir: ./output/rec/srn_new + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 5000] + # if pretrained_model is saved in static mode, load_static_weights must set to True + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + num_heads: 8 + infer_mode: False + use_space_char: False + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + clip_norm: 10.0 + lr: + learning_rate: 0.0001 + +Architecture: + model_type: rec + algorithm: SRN + in_channels: 1 + Transform: + Backbone: + name: ResNetFPN + Head: + name: SRNHead + max_text_length: 25 + num_heads: 8 + num_encoder_TUs: 2 + num_decoder_TUs: 4 + hidden_dims: 512 + +Loss: + name: SRNLoss + +PostProcess: + name: SRNLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SRNLabelEncode: # Class handling label + - SRNRecResizeImg: + image_shape: [1, 64, 256] + - KeepKeys: + keep_keys: ['image', + 'label', + 'length', + 'encoder_word_pos', + 'gsrm_word_pos', + 'gsrm_slf_attn_bias1', + 'gsrm_slf_attn_bias2'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 64 + drop_last: False + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SRNLabelEncode: # Class handling label + - SRNRecResizeImg: + image_shape: [1, 64, 256] + - KeepKeys: + keep_keys: ['image', + 'label', + 'length', + 'encoder_word_pos', + 'gsrm_word_pos', + 'gsrm_slf_attn_bias1', + 'gsrm_slf_attn_bias2'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 32 + num_workers: 4 diff --git a/configs/rec/rec_svtr/rec_svtr_base_8local_10global_stn_en.yml b/configs/rec/rec_svtr/rec_svtr_base_8local_10global_stn_en.yml new file mode 100644 index 0000000..9127838 --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_base_8local_10global_stn_en.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_base_stn_en/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_base.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.00025 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [48, 160] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [48, 160] + out_char_num: 40 # output char patch + out_channels: 256 # char patch dim + patch_merging: 'Conv' + embed_dim: [128, 256, 384] + depth: [3, 6, 9] + num_heads: [4, 8, 12] + mixer: ['Local','Local','Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: False + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: SVTRLabelDecode # SVTRLabelDecode is used for eval after train, please change to CTCLabelDecode when training + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - SVTRRecResizeImg: # SVTRRecResizeImg is used for eval after train, please change to RecResizeImg when training + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 128 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_ch.yml b/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_ch.yml new file mode 100644 index 0000000..ed393ad --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_ch.yml @@ -0,0 +1,113 @@ +Global: + use_gpu: True + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_large_ch/ + save_epoch_step: 10 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 40 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_svtr_large_ch.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0003 + warmup_epoch: 5 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 320] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 320] + out_char_num: 40 + out_channels: 384 + patch_merging: 'Conv' + embed_dim: [192, 256, 512] + depth: [3, 9, 9] + num_heads: [6, 8, 16] + mixer: ['Local','Local','Local','Local','Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + prenorm: False + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/ch_scene + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 128 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/scene_test + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_en.yml b/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_en.yml new file mode 100644 index 0000000..aac7516 --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_large_10local_11global_stn_en.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_large_en/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_large.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.000125 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [48, 160] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [48, 160] + out_char_num: 40 + out_channels: 384 + patch_merging: 'Conv' + embed_dim: [192, 256, 512] + depth: [3, 9, 9] + num_heads: [6, 8, 16] + mixer: ['Local','Local','Local','Local','Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: SVTRLabelDecode # SVTRLabelDecode is used for eval after train, please change to CTCLabelDecode when training + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 128 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - SVTRRecResizeImg: # SVTRRecResizeImg is used for eval after train, please change to RecResizeImg when training + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 128 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_ch.yml b/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_ch.yml new file mode 100644 index 0000000..985f0d3 --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_ch.yml @@ -0,0 +1,114 @@ +Global: + use_gpu: True + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_small_ch/ + save_epoch_step: 10 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 40 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_svtr_small_ch.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0003 + warmup_epoch: 5 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 320] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 320] + out_char_num: 40 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [96, 192, 256] + depth: [3, 6, 6] + num_heads: [3, 6, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: False + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/ch_scene + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 128 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/scene_test + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_en.yml b/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_en.yml new file mode 100644 index 0000000..2405d3e --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_small_8local_7global_stn_en.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_small_stn_en/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_small.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 100] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 100] + out_char_num: 25 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [96, 192, 256] + depth: [3, 6, 6] + num_heads: [3, 6, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: False + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: SVTRLabelDecode # SVTRLabelDecode is used for eval after train, please change to CTCLabelDecode when training + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - SVTRRecResizeImg: # SVTRRecResizeImg is used for eval after train, please change to RecResizeImg when training + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_ch.yml b/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_ch.yml new file mode 100644 index 0000000..42fe93b --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_ch.yml @@ -0,0 +1,114 @@ +Global: + use_gpu: True + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_tiny_ch/ + save_epoch_step: 10 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 40 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_svtr_tiny_ch.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0003 + warmup_epoch: 5 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 320] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 320] + out_char_num: 40 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [64, 128, 256] + depth: [3, 6, 3] + num_heads: [2, 4, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/ch_scene + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 128 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/scene_ch/scene_test + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_en.yml b/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_en.yml new file mode 100644 index 0000000..2a41390 --- /dev/null +++ b/configs/rec/rec_svtr/rec_svtr_tiny_6local_6global_stn_en.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_svtr_tiny_en/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_tiny.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 100] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 100] + out_char_num: 25 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [64, 128, 256] + depth: [3, 6, 3] + num_heads: [2, 4, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_svtrnet.yml b/configs/rec/rec_svtrnet.yml new file mode 100644 index 0000000..233d5e2 --- /dev/null +++ b/configs/rec/rec_svtrnet.yml @@ -0,0 +1,117 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/svtr/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: + character_type: en + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_svtr_tiny.txt + + +Optimizer: + name: AdamW + beta1: 0.9 + beta2: 0.99 + epsilon: 0.00000008 + weight_decay: 0.05 + no_weight_decay_name: norm pos_embed + one_dim_param_no_weight_decay: true + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + +Architecture: + model_type: rec + algorithm: SVTR + Transform: + name: STN_ON + tps_inputsize: [32, 64] + tps_outputsize: [32, 100] + num_control_points: 20 + tps_margins: [0.05,0.05] + stn_activation: none + Backbone: + name: SVTRNet + img_size: [32, 100] + out_char_num: 25 + out_channels: 192 + patch_merging: 'Conv' + embed_dim: [64, 128, 256] + depth: [3, 6, 3] + num_heads: [2, 4, 8] + mixer: ['Local','Local','Local','Local','Local','Local','Global','Global','Global','Global','Global','Global'] + local_mixer: [[7, 11], [7, 11], [7, 11]] + last_stage: True + prenorm: false + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/validation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + character_dict_path: + image_shape: [3, 64, 256] + padding: False + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/rec/rec_vitstr_none_ce.yml b/configs/rec/rec_vitstr_none_ce.yml new file mode 100644 index 0000000..ebe304f --- /dev/null +++ b/configs/rec/rec_vitstr_none_ce.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 20 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/vitstr_none_ce/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations after the 0th iteration# + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: ppocr/utils/EN_symbol_dict.txt + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_vitstr.txt + + +Optimizer: + name: Adadelta + epsilon: 1.e-8 + rho: 0.95 + clip_norm: 5.0 + lr: + learning_rate: 1.0 + +Architecture: + model_type: rec + algorithm: ViTSTR + in_channels: 1 + Transform: + Backbone: + name: ViTSTR + scale: tiny + Neck: + name: SequenceEncoder + encoder_type: reshape + Head: + name: CTCHead + +Loss: + name: CELoss + with_all: True + ignore_index: &ignore_index 0 # Must be zero or greater than the number of character classes + +PostProcess: + name: ViTSTRLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/training/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 48 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: LMDBDataSet + data_dir: ./train_data/data_lmdb_release/evaluation/ + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ViTSTRLabelEncode: # Class handling label + ignore_index: *ignore_index + - GrayRecResizeImg: + image_shape: [224, 224] # W H + resize_type: PIL # PIL or OpenCV + inter_type: 'Image.BICUBIC' + scale: false + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 2 diff --git a/configs/sr/sr_telescope.yml b/configs/sr/sr_telescope.yml new file mode 100644 index 0000000..1152596 --- /dev/null +++ b/configs/sr/sr_telescope.yml @@ -0,0 +1,84 @@ +Global: + use_gpu: true + epoch_num: 100 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/sr/sr_telescope/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 1000] + cal_metric_during_train: False + pretrained_model: + checkpoints: + save_inference_dir: ./output/sr/sr_telescope/infer + use_visualdl: False + infer_img: doc/imgs_words_en/word_52.png + # for data or label process + character_dict_path: + max_text_length: 100 + infer_mode: False + use_space_char: False + save_res_path: ./output/sr/predicts_telescope.txt + +Optimizer: + name: Adam + beta1: 0.5 + beta2: 0.999 + clip_norm: 0.25 + lr: + learning_rate: 0.0001 + +Architecture: + model_type: sr + algorithm: Telescope + Transform: + name: TBSRN + STN: True + infer_mode: True + +Loss: + name: TelescopeLoss + confuse_dict_path: ./ppocr/utils/dict/confuse.pkl + + +PostProcess: + name: None + +Metric: + name: SRMetric + main_indicator: all + +Train: + dataset: + name: LMDBDataSetSR + data_dir: ./train_data/TextZoom/train + transforms: + - SRResize: + imgH: 32 + imgW: 128 + down_sample_scale: 2 + - KeepKeys: + keep_keys: ['img_lr', 'img_hr', 'label'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 16 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSetSR + data_dir: ./train_data/TextZoom/test + transforms: + - SRResize: + imgH: 32 + imgW: 128 + down_sample_scale: 2 + - KeepKeys: + keep_keys: ['img_lr', 'img_hr', 'label'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 16 + num_workers: 4 + diff --git a/configs/sr/sr_tsrn_transformer_strock.yml b/configs/sr/sr_tsrn_transformer_strock.yml new file mode 100644 index 0000000..2e50948 --- /dev/null +++ b/configs/sr/sr_tsrn_transformer_strock.yml @@ -0,0 +1,85 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/sr/sr_tsrn_transformer_strock/ + save_epoch_step: 3 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 1000] + cal_metric_during_train: False + pretrained_model: + checkpoints: + save_inference_dir: sr_output + use_visualdl: False + infer_img: doc/imgs_words_en/word_52.png + # for data or label process + character_dict_path: ./train_data/srdata/english_decomposition.txt + max_text_length: 100 + infer_mode: False + use_space_char: False + save_res_path: ./output/sr/predicts_gestalt.txt + +Optimizer: + name: Adam + beta1: 0.5 + beta2: 0.999 + clip_norm: 0.25 + lr: + learning_rate: 0.0001 + +Architecture: + model_type: sr + algorithm: Gestalt + Transform: + name: TSRN + STN: True + infer_mode: True + +Loss: + name: StrokeFocusLoss + character_dict_path: ./train_data/srdata/english_decomposition.txt + +PostProcess: + name: None + +Metric: + name: SRMetric + main_indicator: all + +Train: + dataset: + name: LMDBDataSetSR + data_dir: ./train_data/srdata/train + transforms: + - SRResize: + imgH: 32 + imgW: 128 + down_sample_scale: 2 + - SRLabelEncode: # Class handling label + - KeepKeys: + keep_keys: ['img_lr', 'img_hr', 'length', 'input_tensor', 'label'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 16 + drop_last: True + num_workers: 4 + +Eval: + dataset: + name: LMDBDataSetSR + data_dir: ./train_data/srdata/test + transforms: + - SRResize: + imgH: 32 + imgW: 128 + down_sample_scale: 2 + - SRLabelEncode: # Class handling label + - KeepKeys: + keep_keys: ['img_lr', 'img_hr','length', 'input_tensor', 'label'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 16 + num_workers: 4 + diff --git a/configs/table/table_mv3.yml b/configs/table/table_mv3.yml new file mode 100644 index 0000000..a74e18d --- /dev/null +++ b/configs/table/table_mv3.yml @@ -0,0 +1,116 @@ +Global: + use_gpu: true + epoch_num: 50 + log_smooth_window: 20 + print_batch_step: 5 + save_model_dir: ./output/table_mv3/ + save_epoch_step: 5 + # evaluation is run every 400 iterations after the 0th iteration + eval_batch_step: [0, 400] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/dict/table_structure_dict.txt + character_type: en + max_text_length: 100 + max_elem_length: 500 + max_cell_num: 500 + infer_mode: False + process_total_num: 0 + process_cut_num: 0 + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + clip_norm: 5.0 + lr: + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00000 + +Architecture: + model_type: table + algorithm: TableAttn + Backbone: + name: MobileNetV3 + scale: 1.0 + model_name: small + disable_se: True + Head: + name: TableAttentionHead + hidden_size: 256 + l2_decay: 0.00001 + loc_type: 2 + +Loss: + name: TableAttentionLoss + structure_weight: 100.0 + loc_weight: 10000.0 + +PostProcess: + name: TableLabelDecode + +Metric: + name: TableMetric + main_indicator: acc + +Train: + dataset: + name: PubTabDataSet + data_dir: train_data/table/pubtabnet/train/ + label_file_path: train_data/table/pubtabnet/PubTabNet_2.0.0_train.jsonl + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ResizeTableImage: + max_len: 488 + - TableLabelEncode: + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - PaddingTableImage: + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'structure', 'bbox_list', 'sp_tokens', 'bbox_list_mask'] + loader: + shuffle: True + batch_size_per_card: 32 + drop_last: True + num_workers: 1 + +Eval: + dataset: + name: PubTabDataSet + data_dir: train_data/table/pubtabnet/val/ + label_file_path: train_data/table/pubtabnet/PubTabNet_2.0.0_val.jsonl + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - ResizeTableImage: + max_len: 488 + - TableLabelEncode: + - NormalizeImage: + scale: 1./255. + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] + order: 'hwc' + - PaddingTableImage: + - ToCHWImage: + - KeepKeys: + keep_keys: ['image', 'structure', 'bbox_list', 'sp_tokens', 'bbox_list_mask'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 16 + num_workers: 1 diff --git a/configs/table/table_mv3_det.yml b/configs/table/table_mv3_det.yml new file mode 100644 index 0000000..f9bb4ba --- /dev/null +++ b/configs/table/table_mv3_det.yml @@ -0,0 +1,15 @@ +Architecture: + model_type: det + algorithm: DB + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + disable_se: true + Neck: + name: DBFPN + out_channels: 96 + Head: + name: DBHead + k: 50 \ No newline at end of file diff --git a/configs/table/table_mv3_rec.yml b/configs/table/table_mv3_rec.yml new file mode 100644 index 0000000..8510ca1 --- /dev/null +++ b/configs/table/table_mv3_rec.yml @@ -0,0 +1,14 @@ +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: CTCHead + fc_decay: 0 diff --git a/configs/table/table_mv3_table_structure.yml b/configs/table/table_mv3_table_structure.yml new file mode 100644 index 0000000..6398e25 --- /dev/null +++ b/configs/table/table_mv3_table_structure.yml @@ -0,0 +1,15 @@ +Architecture: + model_type: table + algorithm: TableAttn + Backbone: + name: MobileNetV3 + scale: 1.0 + model_name: large + disable_se: True + Head: + name: TableAttentionHead + hidden_size: 256 + l2_decay: 0.00001 + loc_type: 2 + max_elem_length: 800 + in_max_len: 512 diff --git a/modules/batch_text_detector.py b/modules/batch_text_detector.py new file mode 100644 index 0000000..2e7088f --- /dev/null +++ b/modules/batch_text_detector.py @@ -0,0 +1,363 @@ +import os +import sys +import copy +import cv2 +import numpy as np +import time +import json +import torch +from .pytorchocr.base_ocr_v20 import BaseOCRV20 +from .pytorchocr.utils.utility import get_image_file_list, check_and_read_gif +from .pytorchocr.data import create_operators, transform +from .pytorchocr.postprocess import build_post_process +import pytorchocr.pytorchocr_utility as utility + +class TextDetector(BaseOCRV20): + def __init__(self, args, **kwargs): + self.args = args + self.det_algorithm = args.det_algorithm + pre_process_list = [{ + 'DetResizeForTest': { + 'limit_side_len': args.det_limit_side_len, + 'limit_type': args.det_limit_type, + } + }, { + 'NormalizeImage': { + 'std': [0.229, 0.224, 0.225], + 'mean': [0.485, 0.456, 0.406], + 'scale': '1./255.', + 'order': 'hwc' + } + }, { + 'ToCHWImage': None + }, { + 'KeepKeys': { + 'keep_keys': ['image', 'shape'] + } + }] + postprocess_params = {} + if self.det_algorithm == "DB": + postprocess_params['name'] = 'DBPostProcess' + postprocess_params["thresh"] = args.det_db_thresh + postprocess_params["box_thresh"] = args.det_db_box_thresh + postprocess_params["max_candidates"] = 1000 + postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio + postprocess_params["use_dilation"] = args.use_dilation + postprocess_params["score_mode"] = args.det_db_score_mode + elif self.det_algorithm == "DB++": + postprocess_params['name'] = 'DBPostProcess' + postprocess_params["thresh"] = args.det_db_thresh + postprocess_params["box_thresh"] = args.det_db_box_thresh + postprocess_params["max_candidates"] = 1000 + postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio + postprocess_params["use_dilation"] = args.use_dilation + postprocess_params["score_mode"] = args.det_db_score_mode + pre_process_list[1] = { + 'NormalizeImage': { + 'std': [1.0, 1.0, 1.0], + 'mean': + [0.48109378172549, 0.45752457890196, 0.40787054090196], + 'scale': '1./255.', + 'order': 'hwc' + } + } + elif self.det_algorithm == "EAST": + postprocess_params['name'] = 'EASTPostProcess' + postprocess_params["score_thresh"] = args.det_east_score_thresh + postprocess_params["cover_thresh"] = args.det_east_cover_thresh + postprocess_params["nms_thresh"] = args.det_east_nms_thresh + elif self.det_algorithm == "SAST": + pre_process_list[0] = { + 'DetResizeForTest': { + 'resize_long': args.det_limit_side_len + } + } + postprocess_params['name'] = 'SASTPostProcess' + postprocess_params["score_thresh"] = args.det_sast_score_thresh + postprocess_params["nms_thresh"] = args.det_sast_nms_thresh + self.det_sast_polygon = args.det_sast_polygon + if self.det_sast_polygon: + postprocess_params["sample_pts_num"] = 6 + postprocess_params["expand_scale"] = 1.2 + postprocess_params["shrink_ratio_of_width"] = 0.2 + else: + postprocess_params["sample_pts_num"] = 2 + postprocess_params["expand_scale"] = 1.0 + postprocess_params["shrink_ratio_of_width"] = 0.3 + elif self.det_algorithm == "PSE": + postprocess_params['name'] = 'PSEPostProcess' + postprocess_params["thresh"] = args.det_pse_thresh + postprocess_params["box_thresh"] = args.det_pse_box_thresh + postprocess_params["min_area"] = args.det_pse_min_area + postprocess_params["box_type"] = args.det_pse_box_type + postprocess_params["scale"] = args.det_pse_scale + self.det_pse_box_type = args.det_pse_box_type + elif self.det_algorithm == "FCE": + pre_process_list[0] = { + 'DetResizeForTest': { + 'rescale_img': [1080, 736] + } + } + postprocess_params['name'] = 'FCEPostProcess' + postprocess_params["scales"] = args.scales + postprocess_params["alpha"] = args.alpha + postprocess_params["beta"] = args.beta + postprocess_params["fourier_degree"] = args.fourier_degree + postprocess_params["box_type"] = args.det_fce_box_type + else: + print("unknown det_algorithm:{}".format(self.det_algorithm)) + sys.exit(0) + + self.preprocess_op = create_operators(pre_process_list) + self.postprocess_op = build_post_process(postprocess_params) + + use_gpu = args.use_gpu + self.use_gpu = torch.cuda.is_available() and use_gpu + + self.weights_path = args.det_model_path + self.yaml_path = args.det_yaml_path + network_config = utility.AnalysisConfig(self.weights_path, self.yaml_path) + super(TextDetector, self).__init__(network_config, **kwargs) + self.load_pytorch_weights(self.weights_path) + self.net.eval() + if self.use_gpu: + self.net.cuda() + + def order_points_clockwise(self, pts): + """ + reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py + # sort the points based on their x-coordinates + """ + xSorted = pts[np.argsort(pts[:, 0]), :] + + # grab the left-most and right-most points from the sorted + # x-roodinate points + leftMost = xSorted[:2, :] + rightMost = xSorted[2:, :] + + # now, sort the left-most coordinates according to their + # y-coordinates so we can grab the top-left and bottom-left + # points, respectively + leftMost = leftMost[np.argsort(leftMost[:, 1]), :] + (tl, bl) = leftMost + + rightMost = rightMost[np.argsort(rightMost[:, 1]), :] + (tr, br) = rightMost + + rect = np.array([tl, tr, br, bl], dtype="float32") + return rect + + def clip_det_res(self, points, img_height, img_width): + for pno in range(points.shape[0]): + points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1)) + points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1)) + return points + + def filter_tag_det_res(self, dt_boxes, image_shape): + img_height, img_width = image_shape[0:2] + dt_boxes_new = [] + for box in dt_boxes: + box = self.order_points_clockwise(box) + box = self.clip_det_res(box, img_height, img_width) + rect_width = int(np.linalg.norm(box[0] - box[1])) + rect_height = int(np.linalg.norm(box[0] - box[3])) + if rect_width <= 3 or rect_height <= 3: + continue + dt_boxes_new.append(box) + dt_boxes = np.array(dt_boxes_new) + return dt_boxes + + def filter_tag_det_res_only_clip(self, dt_boxes, image_shape): + img_height, img_width = image_shape[0:2] + dt_boxes_new = [] + for box in dt_boxes: + box = self.clip_det_res(box, img_height, img_width) + dt_boxes_new.append(box) + dt_boxes = np.array(dt_boxes_new) + return dt_boxes + + def prepare_image(self, img): + data = {'image': img} + data = transform(data, self.preprocess_op) + img, shape_list = data + return data + def preprocess(self,img): + + img, shape_list = self.prepare_image(img) + if img is None: + return None, 0 + img = np.expand_dims(img, axis=0) + shape_list = np.expand_dims(shape_list, axis=0) + inp = torch.from_numpy(img) + if self.use_gpu: + inp = inp.cuda() + return inp, shape_list + + def postprocess(self, outputs,shape_list,ori_shape): + preds = {} + if self.det_algorithm == "EAST": + preds['f_geo'] = outputs['f_geo'].cpu().numpy() + preds['f_score'] = outputs['f_score'].cpu().numpy() + elif self.det_algorithm == 'SAST': + preds['f_border'] = outputs['f_border'].cpu().numpy() + preds['f_score'] = outputs['f_score'].cpu().numpy() + preds['f_tco'] = outputs['f_tco'].cpu().numpy() + preds['f_tvo'] = outputs['f_tvo'].cpu().numpy() + elif self.det_algorithm in ['DB', 'PSE', 'DB++']: + preds['maps'] = outputs['maps'].cpu().numpy() + elif self.det_algorithm == 'FCE': + for i, (k, output) in enumerate(outputs.items()): + preds['level_{}'.format(i)] = output + else: + raise NotImplementedError + + post_result = self.postprocess_op(preds, shape_list) + dt_boxes = post_result[0]['points'] + if (self.det_algorithm == "SAST" and + self.det_sast_polygon) or (self.det_algorithm in ["PSE", "FCE"] and + self.postprocess_op.box_type == 'poly'): + dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_shape) + else: + dt_boxes = self.filter_tag_det_res(dt_boxes, ori_shape) + return dt_boxes + + def __call__(self, img): + ori_shape = img.shape + inp,shape_list = self.preprocess(img) + starttime = time.time() + with torch.no_grad(): + outputs = self.net(inp) + dt_boxes = self.postprocess(outputs,shape_list,ori_shape) + elapse = time.time() - starttime + return dt_boxes, elapse + + +fast_config = {'use_gpu': True, + 'gpu_mem': 500, + 'warmup': False, + 'image_dir': './doc/imgs/1.jpg', + 'det_algorithm': 'DB', + 'det_model_path': 'weights/en_ptocr_v3_det_infer.pth', + 'det_limit_side_len': 960, + 'det_limit_type': 'max', + 'det_db_thresh': 0.3, + 'det_db_box_thresh': 0.6, + 'det_db_unclip_ratio': 1.5, + 'max_batch_size': 10, + 'use_dilation': False, + 'det_db_score_mode': 'fast', + 'det_east_score_thresh': 0.8, + 'det_east_cover_thresh': 0.1, + 'det_east_nms_thresh': 0.2, + 'det_sast_score_thresh': 0.5, + 'det_sast_nms_thresh': 0.2, + 'det_sast_polygon': False, + 'det_pse_thresh': 0, + 'det_pse_box_thresh': 0.85, + 'det_pse_min_area': 16, + 'det_pse_box_type': 'box', + 'det_pse_scale': 1, + 'scales': [8, 16, 32], + 'alpha': 1.0, + 'beta': 1.0, + 'fourier_degree': 5, + 'det_fce_box_type': 'poly', + 'rec_algorithm': 'CRNN', + 'rec_model_path': 'weights/en_ptocr_v4_rec_infer.pth', + 'rec_image_inverse': True, + 'rec_image_shape': '3, 32, 320', + 'rec_char_type': 'ch', + 'rec_batch_num': 6, + 'max_text_length': 25, + 'use_space_char': True, + 'drop_score': 0.5, + 'limited_max_width': 1280, + 'limited_min_width': 16, + 'vis_font_path': '/mnt/data/zhangtianning/projects/doc/fonts/simfang.ttf', + 'rec_char_dict_path': './pytorchocr/utils/en_dict.txt', + 'use_angle_cls': False, + 'cls_model_path': None, + 'cls_image_shape': '3, 48, 192', + 'label_list': ['0', '180'], + 'cls_batch_num': 6, + 'cls_thresh': 0.9, + 'enable_mkldnn': False, + 'use_pdserving': False, + 'e2e_algorithm': 'PGNet', + 'e2e_model_path': None, + 'e2e_limit_side_len': 768, + 'e2e_limit_type': 'max', + 'e2e_pgnet_score_thresh': 0.5, + 'e2e_char_dict_path': '/mnt/data/zhangtianning/projects/pytorchocr/utils/ic15_dict.txt', + 'e2e_pgnet_valid_set': 'totaltext', + 'e2e_pgnet_polygon': True, + 'e2e_pgnet_mode': 'fast', + 'sr_model_path': None, + 'sr_image_shape': '3, 32, 128', + 'sr_batch_num': 1, + 'det_yaml_path': 'configs/det/det_ppocr_v3.yml', + 'rec_yaml_path': './configs/rec/PP-OCRv4/en_PP-OCRv4_rec.yml', + 'cls_yaml_path': None, + 'e2e_yaml_path': None, + 'sr_yaml_path': None, + 'use_mp': False, + 'total_process_num': 1, + 'process_id': 0, + 'benchmark': False, + 'save_log_path': './log_output/', + 'show_log': True} +from argparse import Namespace +class BatchTextDetector(TextDetector): + def __init__(self, **kwargs): + args = Namespace(**fast_config) + super().__init__(args, **kwargs) + + def batch_process(self, img_batch, ori_shape_list): + _input_image_batch = [] + shape_list_batch = [] + for img in img_batch: + _input_image, shape_list = self.preprocess(img) + _input_image_batch.append(_input_image) + shape_list_batch.append(shape_list) + _input_image_batch = torch.cat(_input_image_batch) + shape_list_batch = np.stack(shape_list_batch) + with torch.no_grad(): + dt_boxaes_batch = self.net(_input_image_batch) + pred_batch = self.discard_batch(dt_boxaes_batch) + dt_boxes_list=self.batch_postprocess(pred_batch, shape_list_batch,ori_shape_list) + return dt_boxes_list + + def discard_batch(self, outputs): + preds = {} + if self.det_algorithm == "EAST": + raise NotImplementedError + preds['f_geo'] = outputs['f_geo'].cpu().numpy() + preds['f_score'] = outputs['f_score'].cpu().numpy() + elif self.det_algorithm == 'SAST': + raise NotImplementedError + preds['f_border'] = outputs['f_border'].cpu().numpy() + preds['f_score'] = outputs['f_score'].cpu().numpy() + preds['f_tco'] = outputs['f_tco'].cpu().numpy() + preds['f_tvo'] = outputs['f_tvo'].cpu().numpy() + elif self.det_algorithm in ['DB', 'PSE', 'DB++']: + preds = [{'maps':outputs['maps'][j:j+1]} for j in range(len(outputs['maps']))] + elif self.det_algorithm == 'FCE': + for i, (k, output) in enumerate(outputs.items()): + preds['level_{}'.format(i)] = output + else: + raise NotImplementedError + return preds + + def batch_postprocess(self, preds_list, shape_list_list,ori_shape_list): + dt_boxes_list=[] + for preds, shape_list,ori_shape in zip(preds_list, shape_list_list,ori_shape_list): + post_result = self.postprocess_op(preds, shape_list) + dt_boxes = post_result[0]['points'] + if (self.det_algorithm == "SAST" and + self.det_sast_polygon) or (self.det_algorithm in ["PSE", "FCE"] and + self.postprocess_op.box_type == 'poly'): + dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_shape) + else: + dt_boxes = self.filter_tag_det_res(dt_boxes, ori_shape) + dt_boxes_list.append(dt_boxes) + return dt_boxes_list \ No newline at end of file diff --git a/modules/pytorchocr/__init__.py b/modules/pytorchocr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/pytorchocr/base_ocr_v20.py b/modules/pytorchocr/base_ocr_v20.py new file mode 100644 index 0000000..3c7eb1a --- /dev/null +++ b/modules/pytorchocr/base_ocr_v20.py @@ -0,0 +1,112 @@ +import os, sys +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from collections import OrderedDict +import numpy as np +import cv2 +import torch + +from pytorchocr.modeling.architectures.base_model import BaseModel + +class BaseOCRV20: + def __init__(self, config, **kwargs): + self.config = config + self.build_net(**kwargs) + self.net.eval() + + + def build_net(self, **kwargs): + self.net = BaseModel(self.config, **kwargs) + + + def load_paddle_weights(self, weights_path): + raise NotImplementedError('implemented in converter.') + print('paddle weights loading...') + import paddle.fluid as fluid + with fluid.dygraph.guard(): + para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path) + + for k,v in self.net.state_dict().items(): + name = k + + if name.endswith('num_batches_tracked'): + continue + + if name.endswith('running_mean'): + ppname = name.replace('running_mean', '_mean') + elif name.endswith('running_var'): + ppname = name.replace('running_var', '_variance') + elif name.endswith('bias') or name.endswith('weight'): + ppname = name + elif 'lstm' in name: + ppname = name + + else: + print('Redundance:') + print(name) + raise ValueError + try: + if ppname.endswith('fc.weight'): + self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname].T)) + else: + self.net.state_dict()[k].copy_(torch.Tensor(para_state_dict[ppname])) + except Exception as e: + print('pytorch: {}, {}'.format(k, v.size())) + print('paddle: {}, {}'.format(ppname, para_state_dict[ppname].shape)) + raise e + + print('model is loaded: {}'.format(weights_path)) + + def read_pytorch_weights(self, weights_path): + if not os.path.exists(weights_path): + raise FileNotFoundError('{} is not existed.'.format(weights_path)) + weights = torch.load(weights_path) + return weights + + def get_out_channels(self, weights): + if list(weights.keys())[-1].endswith('.weight') and len(list(weights.values())[-1].shape) == 2: + out_channels = list(weights.values())[-1].numpy().shape[1] + else: + out_channels = list(weights.values())[-1].numpy().shape[0] + return out_channels + + def load_state_dict(self, weights): + self.net.load_state_dict(weights) + print('weights is loaded.') + + def load_pytorch_weights(self, weights_path): + self.net.load_state_dict(torch.load(weights_path)) + print('model is loaded: {}'.format(weights_path)) + + + def save_pytorch_weights(self, weights_path): + try: + torch.save(self.net.state_dict(), weights_path, _use_new_zipfile_serialization=False) + except: + torch.save(self.net.state_dict(), weights_path) # _use_new_zipfile_serialization=False for torch>=1.6.0 + print('model is saved: {}'.format(weights_path)) + + + def print_pytorch_state_dict(self): + print('pytorch:') + for k,v in self.net.state_dict().items(): + print('{}----{}'.format(k,type(v))) + + def read_paddle_weights(self, weights_path): + import paddle.fluid as fluid + with fluid.dygraph.guard(): + para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path) + return para_state_dict, opti_state_dict + + def print_paddle_state_dict(self, weights_path): + import paddle.fluid as fluid + with fluid.dygraph.guard(): + para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path) + print('paddle"') + for k,v in para_state_dict.items(): + print('{}----{}'.format(k,type(v))) + + + def inference(self, inputs): + with torch.no_grad(): + infer = self.net(inputs) + return infer diff --git a/modules/pytorchocr/data/__init__.py b/modules/pytorchocr/data/__init__.py new file mode 100644 index 0000000..4bc5c34 --- /dev/null +++ b/modules/pytorchocr/data/__init__.py @@ -0,0 +1,24 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import os +import sys +import numpy as np +# import paddle +import signal +import random + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) + + +import copy +# from paddle.io import Dataset, DataLoader, BatchSampler, DistributedBatchSampler +# import paddle.distributed as dist + +from pytorchocr.data.imaug import transform, create_operators +# from pytorchocr.data.simple_dataset import SimpleDataSet +# from pytorchocr.data.lmdb_dataset import LMDBDateSet + diff --git a/modules/pytorchocr/data/imaug/__init__.py b/modules/pytorchocr/data/imaug/__init__.py new file mode 100644 index 0000000..dce5011 --- /dev/null +++ b/modules/pytorchocr/data/imaug/__init__.py @@ -0,0 +1,48 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +# from .iaa_augment import IaaAugment +# from .make_border_map import MakeBorderMap +# from .make_shrink_map import MakeShrinkMap +# from .random_crop_data import EastRandomCropData, PSERandomCrop + +# from .rec_img_aug import RecAug, RecResizeImg, ClsResizeImg +# from .randaugment import RandAugment +from .operators import * +# from .label_ops import * + +# from .east_process import * +# from .sast_process import * +from .gen_table_mask import * + +def transform(data, ops=None): + """ transform """ + if ops is None: + ops = [] + for op in ops: + data = op(data) + if data is None: + return None + return data + + +def create_operators(op_param_list, global_config=None): + """ + create operators based on the config + Args: + params(list): a dict list, used to create some operators + """ + assert isinstance(op_param_list, list), ('operator config should be a list') + ops = [] + for operator in op_param_list: + assert isinstance(operator, + dict) and len(operator) == 1, "yaml format error" + op_name = list(operator)[0] + param = {} if operator[op_name] is None else operator[op_name] + if global_config is not None: + param.update(global_config) + op = eval(op_name)(**param) + ops.append(op) + return ops \ No newline at end of file diff --git a/modules/pytorchocr/data/imaug/gen_table_mask.py b/modules/pytorchocr/data/imaug/gen_table_mask.py new file mode 100644 index 0000000..2b14e7a --- /dev/null +++ b/modules/pytorchocr/data/imaug/gen_table_mask.py @@ -0,0 +1,245 @@ +""" +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import sys +import six +import cv2 +import numpy as np + + +class GenTableMask(object): + """ gen table mask """ + + def __init__(self, shrink_h_max, shrink_w_max, mask_type=0, **kwargs): + self.shrink_h_max = 5 + self.shrink_w_max = 5 + self.mask_type = mask_type + + def projection(self, erosion, h, w, spilt_threshold=0): + # 水平投影 + projection_map = np.ones_like(erosion) + project_val_array = [0 for _ in range(0, h)] + + for j in range(0, h): + for i in range(0, w): + if erosion[j, i] == 255: + project_val_array[j] += 1 + # 根据数组,获取切割点 + start_idx = 0 # 记录进入字符区的索引 + end_idx = 0 # 记录进入空白区域的索引 + in_text = False # 是否遍历到了字符区内 + box_list = [] + for i in range(len(project_val_array)): + if in_text == False and project_val_array[i] > spilt_threshold: # 进入字符区了 + in_text = True + start_idx = i + elif project_val_array[i] <= spilt_threshold and in_text == True: # 进入空白区了 + end_idx = i + in_text = False + if end_idx - start_idx <= 2: + continue + box_list.append((start_idx, end_idx + 1)) + + if in_text: + box_list.append((start_idx, h - 1)) + # 绘制投影直方图 + for j in range(0, h): + for i in range(0, project_val_array[j]): + projection_map[j, i] = 0 + return box_list, projection_map + + def projection_cx(self, box_img): + box_gray_img = cv2.cvtColor(box_img, cv2.COLOR_BGR2GRAY) + h, w = box_gray_img.shape + # 灰度图片进行二值化处理 + ret, thresh1 = cv2.threshold(box_gray_img, 200, 255, cv2.THRESH_BINARY_INV) + # 纵向腐蚀 + if h < w: + kernel = np.ones((2, 1), np.uint8) + erode = cv2.erode(thresh1, kernel, iterations=1) + else: + erode = thresh1 + # 水平膨胀 + kernel = np.ones((1, 5), np.uint8) + erosion = cv2.dilate(erode, kernel, iterations=1) + # 水平投影 + projection_map = np.ones_like(erosion) + project_val_array = [0 for _ in range(0, h)] + + for j in range(0, h): + for i in range(0, w): + if erosion[j, i] == 255: + project_val_array[j] += 1 + # 根据数组,获取切割点 + start_idx = 0 # 记录进入字符区的索引 + end_idx = 0 # 记录进入空白区域的索引 + in_text = False # 是否遍历到了字符区内 + box_list = [] + spilt_threshold = 0 + for i in range(len(project_val_array)): + if in_text == False and project_val_array[i] > spilt_threshold: # 进入字符区了 + in_text = True + start_idx = i + elif project_val_array[i] <= spilt_threshold and in_text == True: # 进入空白区了 + end_idx = i + in_text = False + if end_idx - start_idx <= 2: + continue + box_list.append((start_idx, end_idx + 1)) + + if in_text: + box_list.append((start_idx, h - 1)) + # 绘制投影直方图 + for j in range(0, h): + for i in range(0, project_val_array[j]): + projection_map[j, i] = 0 + split_bbox_list = [] + if len(box_list) > 1: + for i, (h_start, h_end) in enumerate(box_list): + if i == 0: + h_start = 0 + if i == len(box_list): + h_end = h + word_img = erosion[h_start:h_end + 1, :] + word_h, word_w = word_img.shape + w_split_list, w_projection_map = self.projection(word_img.T, word_w, word_h) + w_start, w_end = w_split_list[0][0], w_split_list[-1][1] + if h_start > 0: + h_start -= 1 + h_end += 1 + word_img = box_img[h_start:h_end + 1:, w_start:w_end + 1, :] + split_bbox_list.append([w_start, h_start, w_end, h_end]) + else: + split_bbox_list.append([0, 0, w, h]) + return split_bbox_list + + def shrink_bbox(self, bbox): + left, top, right, bottom = bbox + sh_h = min(max(int((bottom - top) * 0.1), 1), self.shrink_h_max) + sh_w = min(max(int((right - left) * 0.1), 1), self.shrink_w_max) + left_new = left + sh_w + right_new = right - sh_w + top_new = top + sh_h + bottom_new = bottom - sh_h + if left_new >= right_new: + left_new = left + right_new = right + if top_new >= bottom_new: + top_new = top + bottom_new = bottom + return [left_new, top_new, right_new, bottom_new] + + def __call__(self, data): + img = data['image'] + cells = data['cells'] + height, width = img.shape[0:2] + if self.mask_type == 1: + mask_img = np.zeros((height, width), dtype=np.float32) + else: + mask_img = np.zeros((height, width, 3), dtype=np.float32) + cell_num = len(cells) + for cno in range(cell_num): + if "bbox" in cells[cno]: + bbox = cells[cno]['bbox'] + left, top, right, bottom = bbox + box_img = img[top:bottom, left:right, :].copy() + split_bbox_list = self.projection_cx(box_img) + for sno in range(len(split_bbox_list)): + split_bbox_list[sno][0] += left + split_bbox_list[sno][1] += top + split_bbox_list[sno][2] += left + split_bbox_list[sno][3] += top + + for sno in range(len(split_bbox_list)): + left, top, right, bottom = split_bbox_list[sno] + left, top, right, bottom = self.shrink_bbox([left, top, right, bottom]) + if self.mask_type == 1: + mask_img[top:bottom, left:right] = 1.0 + data['mask_img'] = mask_img + else: + mask_img[top:bottom, left:right, :] = (255, 255, 255) + data['image'] = mask_img + return data + + +class ResizeTableImage(object): + def __init__(self, max_len, **kwargs): + super(ResizeTableImage, self).__init__() + self.max_len = max_len + + def get_img_bbox(self, cells): + bbox_list = [] + if len(cells) == 0: + return bbox_list + cell_num = len(cells) + for cno in range(cell_num): + if "bbox" in cells[cno]: + bbox = cells[cno]['bbox'] + bbox_list.append(bbox) + return bbox_list + + def resize_img_table(self, img, bbox_list, max_len): + height, width = img.shape[0:2] + ratio = max_len / (max(height, width) * 1.0) + resize_h = int(height * ratio) + resize_w = int(width * ratio) + img_new = cv2.resize(img, (resize_w, resize_h)) + bbox_list_new = [] + for bno in range(len(bbox_list)): + left, top, right, bottom = bbox_list[bno].copy() + left = int(left * ratio) + top = int(top * ratio) + right = int(right * ratio) + bottom = int(bottom * ratio) + bbox_list_new.append([left, top, right, bottom]) + return img_new, bbox_list_new + + def __call__(self, data): + img = data['image'] + if 'cells' not in data: + cells = [] + else: + cells = data['cells'] + bbox_list = self.get_img_bbox(cells) + img_new, bbox_list_new = self.resize_img_table(img, bbox_list, self.max_len) + data['image'] = img_new + cell_num = len(cells) + bno = 0 + for cno in range(cell_num): + if "bbox" in data['cells'][cno]: + data['cells'][cno]['bbox'] = bbox_list_new[bno] + bno += 1 + data['max_len'] = self.max_len + return data + + +class PaddingTableImage(object): + def __init__(self, **kwargs): + super(PaddingTableImage, self).__init__() + + def __call__(self, data): + img = data['image'] + max_len = data['max_len'] + padding_img = np.zeros((max_len, max_len, 3), dtype=np.float32) + height, width = img.shape[0:2] + padding_img[0:height, 0:width, :] = img.copy() + data['image'] = padding_img + return data \ No newline at end of file diff --git a/modules/pytorchocr/data/imaug/operators.py b/modules/pytorchocr/data/imaug/operators.py new file mode 100644 index 0000000..daa67a2 --- /dev/null +++ b/modules/pytorchocr/data/imaug/operators.py @@ -0,0 +1,418 @@ +""" +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import sys +import six +import cv2 +import numpy as np + + +class DecodeImage(object): + """ decode image """ + + def __init__(self, img_mode='RGB', channel_first=False, **kwargs): + self.img_mode = img_mode + self.channel_first = channel_first + + def __call__(self, data): + img = data['image'] + if six.PY2: + assert type(img) is str and len( + img) > 0, "invalid input 'img' in DecodeImage" + else: + assert type(img) is bytes and len( + img) > 0, "invalid input 'img' in DecodeImage" + img = np.frombuffer(img, dtype='uint8') + img = cv2.imdecode(img, 1) + if img is None: + return None + if self.img_mode == 'GRAY': + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + elif self.img_mode == 'RGB': + assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape) + img = img[:, :, ::-1] + + if self.channel_first: + img = img.transpose((2, 0, 1)) + + data['image'] = img + return data + + +class NRTRDecodeImage(object): + """ decode image """ + + def __init__(self, img_mode='RGB', channel_first=False, **kwargs): + self.img_mode = img_mode + self.channel_first = channel_first + + def __call__(self, data): + img = data['image'] + if six.PY2: + assert type(img) is str and len( + img) > 0, "invalid input 'img' in DecodeImage" + else: + assert type(img) is bytes and len( + img) > 0, "invalid input 'img' in DecodeImage" + img = np.frombuffer(img, dtype='uint8') + + img = cv2.imdecode(img, 1) + + if img is None: + return None + if self.img_mode == 'GRAY': + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + elif self.img_mode == 'RGB': + assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape) + img = img[:, :, ::-1] + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if self.channel_first: + img = img.transpose((2, 0, 1)) + data['image'] = img + return data + + +class NormalizeImage(object): + """ normalize image such as substract mean, divide std + """ + + def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs): + if isinstance(scale, str): + scale = eval(scale) + self.scale = np.float32(scale if scale is not None else 1.0 / 255.0) + mean = mean if mean is not None else [0.485, 0.456, 0.406] + std = std if std is not None else [0.229, 0.224, 0.225] + + shape = (3, 1, 1) if order == 'chw' else (1, 1, 3) + self.mean = np.array(mean).reshape(shape).astype('float32') + self.std = np.array(std).reshape(shape).astype('float32') + + def __call__(self, data): + img = data['image'] + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + assert isinstance(img, + np.ndarray), "invalid input 'img' in NormalizeImage" + data['image'] = ( + img.astype('float32') * self.scale - self.mean) / self.std + return data + + +class ToCHWImage(object): + """ convert hwc image to chw image + """ + + def __init__(self, **kwargs): + pass + + def __call__(self, data): + img = data['image'] + from PIL import Image + if isinstance(img, Image.Image): + img = np.array(img) + data['image'] = img.transpose((2, 0, 1)) + return data + + +class Fasttext(object): + def __init__(self, path="None", **kwargs): + import fasttext + self.fast_model = fasttext.load_model(path) + + def __call__(self, data): + label = data['label'] + fast_label = self.fast_model[label] + data['fast_label'] = fast_label + return data + + +class KeepKeys(object): + def __init__(self, keep_keys, **kwargs): + self.keep_keys = keep_keys + + def __call__(self, data): + data_list = [] + for key in self.keep_keys: + data_list.append(data[key]) + return data_list + + +class Resize(object): + def __init__(self, size=(640, 640), **kwargs): + self.size = size + + def resize_image(self, img): + resize_h, resize_w = self.size + ori_h, ori_w = img.shape[:2] # (h, w, c) + ratio_h = float(resize_h) / ori_h + ratio_w = float(resize_w) / ori_w + img = cv2.resize(img, (int(resize_w), int(resize_h))) + return img, [ratio_h, ratio_w] + + def __call__(self, data): + img = data['image'] + text_polys = data['polys'] + + img_resize, [ratio_h, ratio_w] = self.resize_image(img) + new_boxes = [] + for box in text_polys: + new_box = [] + for cord in box: + new_box.append([cord[0] * ratio_w, cord[1] * ratio_h]) + new_boxes.append(new_box) + data['image'] = img_resize + data['polys'] = np.array(new_boxes, dtype=np.float32) + return data + + +class DetResizeForTest(object): + def __init__(self, **kwargs): + super(DetResizeForTest, self).__init__() + self.resize_type = 0 + if 'image_shape' in kwargs: + self.image_shape = kwargs['image_shape'] + self.resize_type = 1 + elif 'limit_side_len' in kwargs: + self.limit_side_len = kwargs['limit_side_len'] + self.limit_type = kwargs.get('limit_type', 'min') + elif 'resize_long' in kwargs: + self.resize_type = 2 + self.resize_long = kwargs.get('resize_long', 960) + else: + self.limit_side_len = 736 + self.limit_type = 'min' + + def __call__(self, data): + img = data['image'] + src_h, src_w, _ = img.shape + + if self.resize_type == 0: + # img, shape = self.resize_image_type0(img) + img, [ratio_h, ratio_w] = self.resize_image_type0(img) + elif self.resize_type == 2: + img, [ratio_h, ratio_w] = self.resize_image_type2(img) + else: + # img, shape = self.resize_image_type1(img) + img, [ratio_h, ratio_w] = self.resize_image_type1(img) + data['image'] = img + data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w]) + return data + + def resize_image_type1(self, img): + resize_h, resize_w = self.image_shape + ori_h, ori_w = img.shape[:2] # (h, w, c) + ratio_h = float(resize_h) / ori_h + ratio_w = float(resize_w) / ori_w + img = cv2.resize(img, (int(resize_w), int(resize_h))) + # return img, np.array([ori_h, ori_w]) + return img, [ratio_h, ratio_w] + + def resize_image_type0(self, img): + """ + resize image to a size multiple of 32 which is required by the network + args: + img(array): array with shape [h, w, c] + return(tuple): + img, (ratio_h, ratio_w) + """ + limit_side_len = self.limit_side_len + h, w, c = img.shape + + # limit the max side + if self.limit_type == 'max': + if max(h, w) > limit_side_len: + if h > w: + ratio = float(limit_side_len) / h + else: + ratio = float(limit_side_len) / w + else: + ratio = 1. + elif self.limit_type == 'min': + if min(h, w) < limit_side_len: + if h < w: + ratio = float(limit_side_len) / h + else: + ratio = float(limit_side_len) / w + else: + ratio = 1. + elif self.limit_type == 'resize_long': + ratio = float(limit_side_len) / max(h, w) + else: + raise Exception('not support limit type, image ') + resize_h = int(h * ratio) + resize_w = int(w * ratio) + + resize_h = max(int(round(resize_h / 32) * 32), 32) + resize_w = max(int(round(resize_w / 32) * 32), 32) + + try: + if int(resize_w) <= 0 or int(resize_h) <= 0: + return None, (None, None) + img = cv2.resize(img, (int(resize_w), int(resize_h))) + except: + print(img.shape, resize_w, resize_h) + sys.exit(0) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + return img, [ratio_h, ratio_w] + + def resize_image_type2(self, img): + h, w, _ = img.shape + + resize_w = w + resize_h = h + + if resize_h > resize_w: + ratio = float(self.resize_long) / resize_h + else: + ratio = float(self.resize_long) / resize_w + + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + img = cv2.resize(img, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + + return img, [ratio_h, ratio_w] + + +class E2EResizeForTest(object): + def __init__(self, **kwargs): + super(E2EResizeForTest, self).__init__() + self.max_side_len = kwargs['max_side_len'] + self.valid_set = kwargs['valid_set'] + + def __call__(self, data): + img = data['image'] + src_h, src_w, _ = img.shape + if self.valid_set == 'totaltext': + im_resized, [ratio_h, ratio_w] = self.resize_image_for_totaltext( + img, max_side_len=self.max_side_len) + else: + im_resized, (ratio_h, ratio_w) = self.resize_image( + img, max_side_len=self.max_side_len) + data['image'] = im_resized + data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w]) + return data + + def resize_image_for_totaltext(self, im, max_side_len=512): + + h, w, _ = im.shape + resize_w = w + resize_h = h + ratio = 1.25 + if h * ratio > max_side_len: + ratio = float(max_side_len) / resize_h + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(im, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + return im, (ratio_h, ratio_w) + + def resize_image(self, im, max_side_len=512): + """ + resize image to a size multiple of max_stride which is required by the network + :param im: the resized image + :param max_side_len: limit of max image size to avoid out of memory in gpu + :return: the resized image and the resize ratio + """ + h, w, _ = im.shape + + resize_w = w + resize_h = h + + # Fix the longer side + if resize_h > resize_w: + ratio = float(max_side_len) / resize_h + else: + ratio = float(max_side_len) / resize_w + + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(im, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + + return im, (ratio_h, ratio_w) + + +class KieResize(object): + def __init__(self, **kwargs): + super(KieResize, self).__init__() + self.max_side, self.min_side = kwargs['img_scale'][0], kwargs[ + 'img_scale'][1] + + def __call__(self, data): + img = data['image'] + points = data['points'] + src_h, src_w, _ = img.shape + im_resized, scale_factor, [ratio_h, ratio_w + ], [new_h, new_w] = self.resize_image(img) + resize_points = self.resize_boxes(img, points, scale_factor) + data['ori_image'] = img + data['ori_boxes'] = points + data['points'] = resize_points + data['image'] = im_resized + data['shape'] = np.array([new_h, new_w]) + return data + + def resize_image(self, img): + norm_img = np.zeros([1024, 1024, 3], dtype='float32') + scale = [512, 1024] + h, w = img.shape[:2] + max_long_edge = max(scale) + max_short_edge = min(scale) + scale_factor = min(max_long_edge / max(h, w), + max_short_edge / min(h, w)) + resize_w, resize_h = int(w * float(scale_factor) + 0.5), int(h * float( + scale_factor) + 0.5) + max_stride = 32 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(img, (resize_w, resize_h)) + new_h, new_w = im.shape[:2] + w_scale = new_w / w + h_scale = new_h / h + scale_factor = np.array( + [w_scale, h_scale, w_scale, h_scale], dtype=np.float32) + norm_img[:new_h, :new_w, :] = im + return norm_img, scale_factor, [h_scale, w_scale], [new_h, new_w] + + def resize_boxes(self, im, points, scale_factor): + points = points * scale_factor + img_shape = im.shape[:2] + points[:, 0::2] = np.clip(points[:, 0::2], 0, img_shape[1]) + points[:, 1::2] = np.clip(points[:, 1::2], 0, img_shape[0]) + return points diff --git a/modules/pytorchocr/modeling/__init__.py b/modules/pytorchocr/modeling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/pytorchocr/modeling/architectures/__init__.py b/modules/pytorchocr/modeling/architectures/__init__.py new file mode 100644 index 0000000..a93052f --- /dev/null +++ b/modules/pytorchocr/modeling/architectures/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +__all__ = ['build_model'] + + +def build_model(config, **kwargs): + from .base_model import BaseModel + + config = copy.deepcopy(config) + module_class = BaseModel(config, **kwargs) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/modeling/architectures/base_model.py b/modules/pytorchocr/modeling/architectures/base_model.py new file mode 100644 index 0000000..b0ba725 --- /dev/null +++ b/modules/pytorchocr/modeling/architectures/base_model.py @@ -0,0 +1,125 @@ +import os, sys +# import torch +import torch.nn as nn +# import torch.nn.functional as F +# from pytorchocr.modeling.common import Activation + +from pytorchocr.modeling.transforms import build_transform +from pytorchocr.modeling.backbones import build_backbone +from pytorchocr.modeling.necks import build_neck +from pytorchocr.modeling.heads import build_head + +class BaseModel(nn.Module): + def __init__(self, config, **kwargs): + """ + the module for OCR. + args: + config (dict): the super parameters for module. + """ + super(BaseModel, self).__init__() + + in_channels = config.get('in_channels', 3) + model_type = config['model_type'] + # build transfrom, + # for rec, transfrom can be TPS,None + # for det and cls, transfrom shoule to be None, + # if you make model differently, you can use transfrom in det and cls + if 'Transform' not in config or config['Transform'] is None: + self.use_transform = False + else: + self.use_transform = True + config['Transform']['in_channels'] = in_channels + self.transform = build_transform(config['Transform']) + in_channels = self.transform.out_channels + # raise NotImplementedError + + # build backbone, backbone is need for del, rec and cls + if 'Backbone' not in config or config['Backbone'] is None: + self.use_backbone = False + else: + self.use_backbone = True + config["Backbone"]['in_channels'] = in_channels + self.backbone = build_backbone(config["Backbone"], model_type) + in_channels = self.backbone.out_channels + + # build neck + # for rec, neck can be cnn,rnn or reshape(None) + # for det, neck can be FPN, BIFPN and so on. + # for cls, neck should be none + if 'Neck' not in config or config['Neck'] is None: + self.use_neck = False + else: + self.use_neck = True + config['Neck']['in_channels'] = in_channels + self.neck = build_neck(config['Neck']) + in_channels = self.neck.out_channels + + # # build head, head is need for det, rec and cls + if 'Head' not in config or config['Head'] is None: + self.use_head = False + else: + self.use_head = True + config["Head"]['in_channels'] = in_channels + self.head = build_head(config["Head"], **kwargs) + + self.return_all_feats = config.get("return_all_feats", False) + + self._initialize_weights() + + def _initialize_weights(self): + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + + + def forward(self, x): + y = dict() + if self.use_transform: + x = self.transform(x) + if self.use_backbone: + x = self.backbone(x) + if isinstance(x, dict): + y.update(x) + else: + y["backbone_out"] = x + final_name = "backbone_out" + if self.use_neck: + x = self.neck(x) + if isinstance(x, dict): + y.update(x) + else: + y["neck_out"] = x + final_name = "neck_out" + if self.use_head: + x = self.head(x) + # for multi head, save ctc neck out for udml + if isinstance(x, dict) and 'ctc_nect' in x.keys(): + y['neck_out'] = x['ctc_neck'] + y['head_out'] = x + elif isinstance(x, dict): + y.update(x) + else: + y["head_out"] = x + if self.return_all_feats: + if self.training: + return y + elif isinstance(x, dict): + return x + else: + return {final_name: x} + else: + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/__init__.py b/modules/pytorchocr/modeling/backbones/__init__.py new file mode 100644 index 0000000..7abdb0e --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/__init__.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['build_backbone'] + + +def build_backbone(config, model_type): + if model_type == 'det': + from .det_mobilenet_v3 import MobileNetV3 + from .det_resnet import ResNet + from .det_resnet_vd import ResNet_vd + from .det_resnet_vd_sast import ResNet_SAST + from .rec_lcnetv3 import PPLCNetV3 + from .rec_hgnet import PPHGNet_small + support_dict = ['MobileNetV3', 'ResNet', 'ResNet_vd', 'ResNet_SAST', 'PPLCNetV3', 'PPHGNet_small'] + elif model_type == 'rec' or model_type == 'cls': + from .rec_mobilenet_v3 import MobileNetV3 + from .rec_resnet_vd import ResNet + from .rec_resnet_fpn import ResNetFPN + from .rec_mv1_enhance import MobileNetV1Enhance + from .rec_nrtr_mtb import MTB + from .rec_resnet_31 import ResNet31 + from .rec_svtrnet import SVTRNet + from .rec_vitstr import ViTSTR + from .rec_densenet import DenseNet + from .rec_lcnetv3 import PPLCNetV3 + from .rec_hgnet import PPHGNet_small + support_dict = ['MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB', + 'ResNet31', 'SVTRNet', 'ViTSTR', 'DenseNet', 'PPLCNetV3', 'PPHGNet_small'] + elif model_type == 'e2e': + from .e2e_resnet_vd_pg import ResNet + support_dict = ['ResNet'] + elif model_type == "table": + from .table_resnet_vd import ResNet + from .table_mobilenet_v3 import MobileNetV3 + support_dict = ["ResNet", "MobileNetV3"] + else: + raise NotImplementedError + + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'when model typs is {}, backbone only support {}'.format(model_type, + support_dict)) + module_class = eval(module_name)(**config) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/det_mobilenet_v3.py b/modules/pytorchocr/modeling/backbones/det_mobilenet_v3.py new file mode 100644 index 0000000..795f5d1 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/det_mobilenet_v3.py @@ -0,0 +1,256 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels, + ) + if self.if_act: + self.act = Activation(act_type=act, inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self.act(x) + return x + + +class SEModule(nn.Module): + def __init__(self, in_channels, reduction=4, name=""): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=in_channels, + out_channels=in_channels // reduction, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.relu1 = Activation(act_type='relu', inplace=True) + self.conv2 = nn.Conv2d( + in_channels=in_channels // reduction, + out_channels=in_channels, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.hard_sigmoid = Activation(act_type='hard_sigmoid', inplace=True) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = self.relu1(outputs) + outputs = self.conv2(outputs) + outputs = self.hard_sigmoid(outputs) + outputs = inputs * outputs + return outputs + + +class ResidualUnit(nn.Module): + def __init__(self, + in_channels, + mid_channels, + out_channels, + kernel_size, + stride, + use_se, + act=None, + name=''): + super(ResidualUnit, self).__init__() + self.if_shortcut = stride == 1 and in_channels == out_channels + self.if_se = use_se + + self.expand_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + if_act=True, + act=act, + name=name + "_expand") + self.bottleneck_conv = ConvBNLayer( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=int((kernel_size - 1) // 2), + groups=mid_channels, + if_act=True, + act=act, + name=name + "_depthwise") + if self.if_se: + self.mid_se = SEModule(mid_channels, name=name + "_se") + self.linear_conv = ConvBNLayer( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + if_act=False, + act=None, + name=name + "_linear") + + def forward(self, inputs): + x = self.expand_conv(inputs) + x = self.bottleneck_conv(x) + if self.if_se: + x = self.mid_se(x) + x = self.linear_conv(x) + if self.if_shortcut: + x = inputs + x + return x + + +class MobileNetV3(nn.Module): + def __init__(self, + in_channels=3, + model_name='large', + scale=0.5, + disable_se=False, + **kwargs): + """ + the MobilenetV3 backbone network for detection module. + Args: + params(dict): the super parameters for build network + """ + super(MobileNetV3, self).__init__() + + self.disable_se = disable_se + + if model_name == "large": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, 'relu', 1], + [3, 64, 24, False, 'relu', 2], + [3, 72, 24, False, 'relu', 1], + [5, 72, 40, True, 'relu', 2], + [5, 120, 40, True, 'relu', 1], + [5, 120, 40, True, 'relu', 1], + [3, 240, 80, False, 'hard_swish', 2], + [3, 200, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 480, 112, True, 'hard_swish', 1], + [3, 672, 112, True, 'hard_swish', 1], + [5, 672, 160, True, 'hard_swish', 2], + [5, 960, 160, True, 'hard_swish', 1], + [5, 960, 160, True, 'hard_swish', 1], + ] + cls_ch_squeeze = 960 + elif model_name == "small": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, 'relu', 2], + [3, 72, 24, False, 'relu', 2], + [3, 88, 24, False, 'relu', 1], + [5, 96, 40, True, 'hard_swish', 2], + [5, 240, 40, True, 'hard_swish', 1], + [5, 240, 40, True, 'hard_swish', 1], + [5, 120, 48, True, 'hard_swish', 1], + [5, 144, 48, True, 'hard_swish', 1], + [5, 288, 96, True, 'hard_swish', 2], + [5, 576, 96, True, 'hard_swish', 1], + [5, 576, 96, True, 'hard_swish', 1], + ] + cls_ch_squeeze = 576 + else: + raise NotImplementedError("mode[" + model_name + + "_model] is not implemented!") + + supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25] + assert scale in supported_scale, \ + "supported scale are {} but input scale is {}".format(supported_scale, scale) + inplanes = 16 + # conv1 + self.conv = ConvBNLayer( + in_channels=in_channels, + out_channels=make_divisible(inplanes * scale), + kernel_size=3, + stride=2, + padding=1, + groups=1, + if_act=True, + act='hard_swish', + name='conv1') + + self.stages = nn.ModuleList() + self.out_channels = [] + block_list = [] + i = 0 + inplanes = make_divisible(inplanes * scale) + for (k, exp, c, se, nl, s) in cfg: + se = se and not self.disable_se + if s == 2 and i > 2: + self.out_channels.append(inplanes) + self.stages.append(nn.Sequential(*block_list)) + block_list = [] + block_list.append( + ResidualUnit( + in_channels=inplanes, + mid_channels=make_divisible(scale * exp), + out_channels=make_divisible(scale * c), + kernel_size=k, + stride=s, + use_se=se, + act=nl, + name="conv" + str(i + 2))) + inplanes = make_divisible(scale * c) + i += 1 + block_list.append( + ConvBNLayer( + in_channels=inplanes, + out_channels=make_divisible(scale * cls_ch_squeeze), + kernel_size=1, + stride=1, + padding=0, + groups=1, + if_act=True, + act='hard_swish', + name='conv_last')) + self.stages.append(nn.Sequential(*block_list)) + self.out_channels.append(make_divisible(scale * cls_ch_squeeze)) + # for i, stage in enumerate(self.stages): + # self.add_sublayer(sublayer=stage, name="stage{}".format(i)) + + def forward(self, x): + x = self.conv(x) + out_list = [] + for stage in self.stages: + x = stage(x) + out_list.append(x) + return out_list diff --git a/modules/pytorchocr/modeling/backbones/det_resnet.py b/modules/pytorchocr/modeling/backbones/det_resnet.py new file mode 100644 index 0000000..f925d74 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/det_resnet.py @@ -0,0 +1,210 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from .det_resnet_vd import DeformableConvV2, ConvBNLayer + + +class BottleneckBlock(nn.Module): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + is_dcn=False): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + act="relu", ) + self.conv1 = ConvBNLayer( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + stride=stride, + act="relu", + is_dcn=is_dcn, + # dcn_groups=1, + ) + self.conv2 = ConvBNLayer( + in_channels=num_filters, + out_channels=num_filters * 4, + kernel_size=1, + act=None, ) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=num_channels, + out_channels=num_filters * 4, + kernel_size=1, + stride=stride, ) + + self.shortcut = shortcut + + self._num_channels_out = num_filters * 4 + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = torch.add(short, conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + num_channels, + num_filters, + stride, + shortcut=True, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=3, + stride=stride, + act="relu") + self.conv1 = ConvBNLayer( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + act=None) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + stride=stride) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv1) + y = F.relu(y) + return y + + +class ResNet(nn.Module): + def __init__(self, + in_channels=3, + layers=50, + out_indices=None, + dcn_stage=None): + super(ResNet, self).__init__() + + self.layers = layers + self.input_image_channel = in_channels + + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.dcn_stage = dcn_stage if dcn_stage is not None else [ + False, False, False, False + ] + self.out_indices = out_indices if out_indices is not None else [ + 0, 1, 2, 3 + ] + + self.conv = ConvBNLayer( + in_channels=self.input_image_channel, + out_channels=64, + kernel_size=7, + stride=2, + act="relu", ) + self.pool2d_max = nn.MaxPool2d( + kernel_size=3, + stride=2, + padding=1, ) + + self.stages = nn.ModuleList() + self.out_channels = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + block_list = nn.Sequential() + is_dcn = self.dcn_stage[block] + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = BottleneckBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + is_dcn=is_dcn) + block_list.add_module(conv_name, bottleneck_block) + shortcut = True + if block in self.out_indices: + self.out_channels.append(num_filters[block] * 4) + self.stages.append(block_list) + else: + for block in range(len(depth)): + shortcut = False + block_list = nn.Sequential() + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = BasicBlock( + num_channels=num_channels[block] + if i == 0 else num_filters[block], + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut) + block_list.add_module(conv_name, basic_block) + shortcut = True + if block in self.out_indices: + self.out_channels.append(num_filters[block]) + self.stages.append(block_list) + + def forward(self, inputs): + y = self.conv(inputs) + y = self.pool2d_max(y) + out = [] + for i, block in enumerate(self.stages): + y = block(y) + if i in self.out_indices: + out.append(y) + return out diff --git a/modules/pytorchocr/modeling/backbones/det_resnet_vd.py b/modules/pytorchocr/modeling/backbones/det_resnet_vd.py new file mode 100644 index 0000000..2330bc7 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/det_resnet_vd.py @@ -0,0 +1,360 @@ + + +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +import torchvision + +class DeformableConvV2(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + weight_attr=None, + bias_attr=None, + lr_scale=1, + regularizer=None, + skip_quant=False, + dcn_bias_regularizer=None, + dcn_bias_lr_scale=2.): + super(DeformableConvV2, self).__init__() + self.offset_channel = 2 * kernel_size**2 * groups + self.mask_channel = kernel_size**2 * groups + + if bias_attr: + # in FCOS-DCN head, specifically need learning_rate and regularizer + dcn_bias_attr = True + else: + # in ResNet backbone, do not need bias + dcn_bias_attr = False + self.conv_dcn = torchvision.ops.DeformConv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2 * dilation, + dilation=dilation, + groups=groups//2 if groups > 1 else 1, + bias=dcn_bias_attr) + + self.conv_offset = nn.Conv2d( + in_channels, + groups * 3 * kernel_size**2, + kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + bias=True) + if skip_quant: + self.conv_offset.skip_quant = True + + def forward(self, x): + offset_mask = self.conv_offset(x) + offset, mask = torch.split( + offset_mask, + split_size_or_sections=[self.offset_channel, self.mask_channel], + dim=1) + mask = torch.sigmoid(mask) + y = self.conv_dcn(x, offset, mask=mask) + return y + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + dcn_groups=1, + is_vd_mode=False, + act=None, + name=None, + is_dcn=False, + ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self.act = act + self._pool2d_avg = nn.AvgPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + if not is_dcn: + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + else: + self._conv = DeformableConvV2( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=dcn_groups, + bias_attr=False) + + self._batch_norm = nn.BatchNorm2d( + out_channels, + track_running_stats=True, + ) + + if act is not None: + self._act = Activation(act_type=act, inplace=True) + + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class BottleneckBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None, + is_dcn=False, + ): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b", + is_dcn=is_dcn, + dcn_groups=2, + ) + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = short + conv1 + y = F.relu(y) + return y + + +class ResNet_vd(nn.Module): + def __init__(self, + in_channels=3, + layers=50, + dcn_stage=None, + out_indices=None, + **kwargs): + super(ResNet_vd, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.dcn_stage = dcn_stage if dcn_stage is not None else [ + False, False, False, False + ] + self.out_indices = out_indices if out_indices is not None else [ + 0, 1, 2, 3 + ] + + self.conv1_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=32, + kernel_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.stages = nn.ModuleList() + self.out_channels = [] + if layers >= 50: + for block in range(len(depth)): + # block_list = [] + block_list = nn.Sequential() + shortcut = False + is_dcn = self.dcn_stage[block] + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name, + is_dcn=is_dcn, + ) + + shortcut = True + block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block) + if block in self.out_indices: + self.out_channels.append(num_filters[block] * 4) + # self.stages.append(nn.Sequential(*block_list)) + self.stages.append(block_list) + else: + for block in range(len(depth)): + # block_list = [] + block_list = nn.Sequential() + shortcut = False + # is_dcn = self.dcn_stage[block] + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + + shortcut = True + block_list.add_module('bb_%d_%d' % (block, i), basic_block) + # block_list.append(basic_block) + if block in self.out_indices: + self.out_channels.append(num_filters[block]) + self.stages.append(block_list) + + # self.stages.append(nn.Sequential(*block_list)) + + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + out = [] + for i, block in enumerate(self.stages): + y = block(y) + if i in self.out_indices: + out.append(y) + return out \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/det_resnet_vd_sast.py b/modules/pytorchocr/modeling/backbones/det_resnet_vd_sast.py new file mode 100644 index 0000000..0f49643 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/det_resnet_vd_sast.py @@ -0,0 +1,279 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import ParamAttr +# import paddle.nn as nn +# import paddle.nn.functional as F + +__all__ = ["ResNet_SAST"] + + +class ConvBNLayer(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = nn.BatchNorm2d( + out_channels,) + self.act = act + if act is not None: + self._act = Activation(act_type=act) + + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act: + y = self._act(y) + return y + + +class BottleneckBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv1) + y = F.relu(y) + return y + + +class ResNet_SAST(nn.Module): + def __init__(self, in_channels=3, layers=50, **kwargs): + super(ResNet_SAST, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + # depth = [3, 4, 6, 3] + depth = [3, 4, 6, 3, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + # num_channels = [64, 256, 512, + # 1024] if layers >= 50 else [64, 64, 128, 256] + # num_filters = [64, 128, 256, 512] + num_channels = [64, 256, 512, + 1024, 2048] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512, 512] + + self.conv1_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=32, + kernel_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name="conv1_3") + # self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.stages = nn.ModuleList() + self.out_channels = [3, 64] + if layers >= 50: + for block in range(len(depth)): + # block_list = [] + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = BottleneckBlock( + in_channels=num_channels[block] if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name + ) + shortcut = True + # block_list.append(bottleneck_block) + block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block) + self.out_channels.append(num_filters[block] * 4) + # self.stages.append(nn.Sequential(*block_list)) + self.stages.append(block_list) + else: + for block in range(len(depth)): + # block_list = [] + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + shortcut = True + # block_list.append(basic_block) + block_list.add_module('bb_%d_%d' % (block, i), basic_block) + self.out_channels.append(num_filters[block]) + # self.stages.append(nn.Sequential(*block_list)) + self.stages.append(block_list) + + def forward(self, inputs): + out = [inputs] + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + out.append(y) + y = self.pool2d_max(y) + for block in self.stages: + y = block(y) + out.append(y) + return out \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/e2e_resnet_vd_pg.py b/modules/pytorchocr/modeling/backbones/e2e_resnet_vd_pg.py new file mode 100644 index 0000000..d6ff30f --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/e2e_resnet_vd_pg.py @@ -0,0 +1,247 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +__all__ = ["ResNet"] + + +class ConvBNLayer(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = nn.BatchNorm2d(out_channels) + self.act = act + if self.act is not None: + self._act = Activation(act_type=self.act, inplace=True) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class BottleneckBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=stride, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv1) + y = F.relu(y) + return y + + +class ResNet(nn.Module): + def __init__(self, in_channels=3, layers=50, **kwargs): + super(ResNet, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + # depth = [3, 4, 6, 3] + depth = [3, 4, 6, 3, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024, + 2048] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512, 512] + + self.conv1_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=64, + kernel_size=7, + stride=2, + act='relu', + name="conv1_1") + self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.stages = nn.ModuleList() + self.out_channels = [3, 64] + # num_filters = [64, 128, 256, 512, 512] + if layers >= 50: + for block in range(len(depth)): + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneckBlock = BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + shortcut = True + block_list.add_module('bb_%d_%d' % (block, i), bottleneckBlock) + self.out_channels.append(num_filters[block] * 4) + self.stages.append(block_list) + else: + for block in range(len(depth)): + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basicBlock = BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + shortcut = True + block_list.add_module('bb_%d_%d' % (block, i), basicBlock) + self.out_channels.append(num_filters[block]) + self.stages.append(block_list) + + + def forward(self, inputs): + out = [inputs] + y = self.conv1_1(inputs) + out.append(y) + y = self.pool2d_max(y) + for block in self.stages: + y = block(y) + out.append(y) + return out diff --git a/modules/pytorchocr/modeling/backbones/rec_densenet.py b/modules/pytorchocr/modeling/backbones/rec_densenet.py new file mode 100644 index 0000000..8c1989f --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_densenet.py @@ -0,0 +1,133 @@ +""" +This code is refer from: +https://github.com/LBH1024/CAN/models/densenet.py + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Bottleneck(nn.Module): + def __init__(self, nChannels, growthRate, use_dropout): + super(Bottleneck, self).__init__() + interChannels = 4 * growthRate + self.bn1 = nn.BatchNorm2d(interChannels) + self.conv1 = nn.Conv2d( + nChannels, interChannels, kernel_size=1, + bias=True) # Xavier initialization + self.bn2 = nn.BatchNorm2d(growthRate) + self.conv2 = nn.Conv2d( + interChannels, growthRate, kernel_size=3, padding=1, + bias=True) # Xavier initialization + self.use_dropout = use_dropout + self.dropout = nn.Dropout(p=0.2) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + if self.use_dropout: + out = self.dropout(out) + out = F.relu(self.bn2(self.conv2(out))) + if self.use_dropout: + out = self.dropout(out) + out = torch.cat([x, out], 1) + return out + + +class SingleLayer(nn.Module): + def __init__(self, nChannels, growthRate, use_dropout): + super(SingleLayer, self).__init__() + self.bn1 = nn.BatchNorm2d(nChannels) + self.conv1 = nn.Conv2d( + nChannels, growthRate, kernel_size=3, padding=1, bias=False) + + self.use_dropout = use_dropout + self.dropout = nn.Dropout(p=0.2) + + def forward(self, x): + out = self.conv1(F.relu(x)) + if self.use_dropout: + out = self.dropout(out) + + out = torch.cat([x, out], 1) + return out + + +class Transition(nn.Module): + def __init__(self, nChannels, out_channels, use_dropout): + super(Transition, self).__init__() + self.bn1 = nn.BatchNorm2d(out_channels) + self.conv1 = nn.Conv2d( + nChannels, out_channels, kernel_size=1, bias=False) + self.use_dropout = use_dropout + self.dropout = nn.Dropout(p=0.2) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + if self.use_dropout: + out = self.dropout(out) + out = F.avg_pool2d(out, 2, ceil_mode=True, count_include_pad=False) + return out + + +class DenseNet(nn.Module): + def __init__(self, growthRate, reduction, bottleneck, use_dropout, + input_channel, **kwargs): + super(DenseNet, self).__init__() + + nDenseBlocks = 16 + nChannels = 2 * growthRate + + self.conv1 = nn.Conv2d( + input_channel, + nChannels, + kernel_size=7, + padding=3, + stride=2, + bias=False) + self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, + bottleneck, use_dropout) + nChannels += nDenseBlocks * growthRate + out_channels = int(math.floor(nChannels * reduction)) + self.trans1 = Transition(nChannels, out_channels, use_dropout) + + nChannels = out_channels + self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, + bottleneck, use_dropout) + nChannels += nDenseBlocks * growthRate + out_channels = int(math.floor(nChannels * reduction)) + self.trans2 = Transition(nChannels, out_channels, use_dropout) + + nChannels = out_channels + self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, + bottleneck, use_dropout) + self.out_channels = out_channels + + def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck, + use_dropout): + layers = [] + for i in range(int(nDenseBlocks)): + if bottleneck: + layers.append(Bottleneck(nChannels, growthRate, use_dropout)) + else: + layers.append(SingleLayer(nChannels, growthRate, use_dropout)) + nChannels += growthRate + return nn.Sequential(*layers) + + def forward(self, inputs): + x, x_m, y = inputs + out = self.conv1(x) + out = F.relu(out, inplace=True) + out = F.max_pool2d(out, 2, ceil_mode=True) + out = self.dense1(out) + out = self.trans1(out) + out = self.dense2(out) + out = self.trans2(out) + out = self.dense3(out) + return out, x_m, y diff --git a/modules/pytorchocr/modeling/backbones/rec_hgnet.py b/modules/pytorchocr/modeling/backbones/rec_hgnet.py new file mode 100644 index 0000000..fecb1bc --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_hgnet.py @@ -0,0 +1,324 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ConvBNAct(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + use_act=True): + super().__init__() + self.use_act = use_act + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + self.bn = nn.BatchNorm2d(out_channels) + if self.use_act: + self.act = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.use_act: + x = self.act(x) + return x + + +class ESEModule(nn.Module): + def __init__(self, channels): + super().__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv = nn.Conv2d( + in_channels=channels, + out_channels=channels, + kernel_size=1, + stride=1, + padding=0) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv(x) + x = self.sigmoid(x) + return x * identity + + +class HG_Block(nn.Module): + def __init__( + self, + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False, ): + super().__init__() + self.identity = identity + + self.layers = nn.ModuleList() + self.layers.append( + ConvBNAct( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + for _ in range(layer_num - 1): + self.layers.append( + ConvBNAct( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, + stride=1)) + + # feature aggregation + total_channels = in_channels + layer_num * mid_channels + self.aggregation_conv = ConvBNAct( + in_channels=total_channels, + out_channels=out_channels, + kernel_size=1, + stride=1) + self.att = ESEModule(out_channels) + + def forward(self, x): + identity = x + output = [] + output.append(x) + for layer in self.layers: + x = layer(x) + output.append(x) + x = torch.cat(output, dim=1) + x = self.aggregation_conv(x) + x = self.att(x) + if self.identity: + x += identity + return x + + +class HG_Stage(nn.Module): + def __init__(self, + in_channels, + mid_channels, + out_channels, + block_num, + layer_num, + downsample=True, + stride=[2, 1]): + super().__init__() + self.downsample = downsample + if downsample: + self.downsample = ConvBNAct( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=3, + stride=stride, + groups=in_channels, + use_act=False) + + blocks_list = [] + blocks_list.append( + HG_Block( + in_channels, + mid_channels, + out_channels, + layer_num, + identity=False)) + for _ in range(block_num - 1): + blocks_list.append( + HG_Block( + out_channels, + mid_channels, + out_channels, + layer_num, + identity=True)) + self.blocks = nn.Sequential(*blocks_list) + + def forward(self, x): + if self.downsample: + x = self.downsample(x) + x = self.blocks(x) + return x + + +class PPHGNet(nn.Module): + """ + PPHGNet + Args: + stem_channels: list. Stem channel list of PPHGNet. + stage_config: dict. The configuration of each stage of PPHGNet. such as the number of channels, stride, etc. + layer_num: int. Number of layers of HG_Block. + use_last_conv: boolean. Whether to use a 1x1 convolutional layer before the classification layer. + class_expand: int=2048. Number of channels for the last 1x1 convolutional layer. + dropout_prob: float. Parameters of dropout, 0.0 means dropout is not used. + class_num: int=1000. The number of classes. + Returns: + model: nn.Layer. Specific PPHGNet model depends on args. + """ + + def __init__( + self, + stem_channels, + stage_config, + layer_num, + in_channels=3, + det=False, + out_indices=None): + super().__init__() + self.det = det + self.out_indices = out_indices if out_indices is not None else [ + 0, 1, 2, 3 + ] + + # stem + stem_channels.insert(0, in_channels) + self.stem = nn.Sequential(* [ + ConvBNAct( + in_channels=stem_channels[i], + out_channels=stem_channels[i + 1], + kernel_size=3, + stride=2 if i == 0 else 1) for i in range( + len(stem_channels) - 1) + ]) + + if self.det: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + # stages + self.stages = nn.ModuleList() + self.out_channels = [] + for block_id, k in enumerate(stage_config): + in_channels, mid_channels, out_channels, block_num, downsample, stride = stage_config[ + k] + self.stages.append( + HG_Stage(in_channels, mid_channels, out_channels, block_num, + layer_num, downsample, stride)) + if block_id in self.out_indices: + self.out_channels.append(out_channels) + + if not self.det: + self.out_channels = stage_config["stage4"][2] + + self._init_weights() + + def _init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.stem(x) + if self.det: + x = self.pool(x) + + out = [] + for i, stage in enumerate(self.stages): + x = stage(x) + if self.det and i in self.out_indices: + out.append(x) + if self.det: + return out + + if self.training: + x = F.adaptive_avg_pool2d(x, [1, 40]) + else: + x = F.avg_pool2d(x, [3, 2]) + return x + + +def PPHGNet_tiny(pretrained=False, use_ssld=False, **kwargs): + """ + PPHGNet_tiny + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_tiny` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [96, 96, 224, 1, False, [2, 1]], + "stage2": [224, 128, 448, 1, True, [1, 2]], + "stage3": [448, 160, 512, 2, True, [2, 1]], + "stage4": [512, 192, 768, 1, True, [2, 1]], + } + + model = PPHGNet( + stem_channels=[48, 48, 96], + stage_config=stage_config, + layer_num=5, + **kwargs) + return model + + +def PPHGNet_small(pretrained=False, use_ssld=False, det=False, **kwargs): + """ + PPHGNet_small + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_small` model depends on args. + """ + stage_config_det = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [128, 128, 256, 1, False, 2], + "stage2": [256, 160, 512, 1, True, 2], + "stage3": [512, 192, 768, 2, True, 2], + "stage4": [768, 224, 1024, 1, True, 2], + } + + stage_config_rec = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [128, 128, 256, 1, True, [2, 1]], + "stage2": [256, 160, 512, 1, True, [1, 2]], + "stage3": [512, 192, 768, 2, True, [2, 1]], + "stage4": [768, 224, 1024, 1, True, [2, 1]], + } + + model = PPHGNet( + stem_channels=[64, 64, 128], + stage_config=stage_config_det if det else stage_config_rec, + layer_num=6, + det=det, + **kwargs) + return model + + +def PPHGNet_base(pretrained=False, use_ssld=True, **kwargs): + """ + PPHGNet_base + Args: + pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise. + If str, means the path of the pretrained model. + use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True. + Returns: + model: nn.Layer. Specific `PPHGNet_base` model depends on args. + """ + stage_config = { + # in_channels, mid_channels, out_channels, blocks, downsample + "stage1": [160, 192, 320, 1, False, [2, 1]], + "stage2": [320, 224, 640, 2, True, [1, 2]], + "stage3": [640, 256, 960, 3, True, [2, 1]], + "stage4": [960, 288, 1280, 2, True, [2, 1]], + } + + model = PPHGNet( + stem_channels=[96, 96, 160], + stage_config=stage_config, + layer_num=7, + **kwargs) + return model diff --git a/modules/pytorchocr/modeling/backbones/rec_lcnetv3.py b/modules/pytorchocr/modeling/backbones/rec_lcnetv3.py new file mode 100644 index 0000000..a25bfeb --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_lcnetv3.py @@ -0,0 +1,474 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +# from paddle.nn.initializer import Constant, KaimingNormal +# from paddle.nn import AdaptiveAvgPool2D, BatchNorm2D, Conv2D, Dropout, Hardsigmoid, Hardswish, Identity, Linear, ReLU +# from paddle.regularizer import L2Decay + +NET_CONFIG_det = { + "blocks2": + #k, in_c, out_c, s, use_se + [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]], + "blocks5": + [[3, 128, 256, 2, False], [5, 256, 256, 1, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True], + [5, 512, 512, 1, False], [5, 512, 512, 1, False]] +} + +NET_CONFIG_rec = { + "blocks2": + #k, in_c, out_c, s, use_se + [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 1, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, (2, 1), False], [3, 128, 128, 1, False]], + "blocks5": + [[3, 128, 256, (1, 2), False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, (2, 1), True], [5, 512, 512, 1, True], + [5, 512, 512, (2, 1), False], [5, 512, 512, 1, False]] +} + + +def make_divisible(v, divisor=16, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class LearnableAffineBlock(nn.Module): + def __init__(self, scale_value=1.0, bias_value=0.0, lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.scale = nn.Parameter(torch.Tensor([scale_value])) + self.bias = nn.Parameter(torch.Tensor([bias_value])) + + def forward(self, x): + return self.scale * x + self.bias + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + lr_mult=1.0): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels, + ) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class Act(nn.Module): + def __init__(self, act="hswish", lr_mult=1.0, lab_lr=0.1): + super().__init__() + if act == "hswish": + self.act = nn.Hardswish(inplace=True) + else: + assert act == "relu" + self.act = Activation(act) + self.lab = LearnableAffineBlock(lr_mult=lr_mult, lab_lr=lab_lr) + + def forward(self, x): + return self.lab(self.act(x)) + + +class LearnableRepLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + num_conv_branches=1, + lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.is_repped = False + self.groups = groups + self.stride = stride + self.kernel_size = kernel_size + self.in_channels = in_channels + self.out_channels = out_channels + self.num_conv_branches = num_conv_branches + self.padding = (kernel_size - 1) // 2 + + self.identity = nn.BatchNorm2d( + num_features=in_channels, + ) if out_channels == in_channels and stride == 1 else None + + self.conv_kxk = nn.ModuleList([ + ConvBNLayer( + in_channels, + out_channels, + kernel_size, + stride, + groups=groups, + lr_mult=lr_mult) for _ in range(self.num_conv_branches) + ]) + + self.conv_1x1 = ConvBNLayer( + in_channels, + out_channels, + 1, + stride, + groups=groups, + lr_mult=lr_mult) if kernel_size > 1 else None + + self.lab = LearnableAffineBlock(lr_mult=lr_mult, lab_lr=lab_lr) + self.act = Act(lr_mult=lr_mult, lab_lr=lab_lr) + + def forward(self, x): + # for export + if self.is_repped: + out = self.lab(self.reparam_conv(x)) + if self.stride != 2: + out = self.act(out) + return out + + out = 0 + if self.identity is not None: + out += self.identity(x) + + if self.conv_1x1 is not None: + out += self.conv_1x1(x) + + for conv in self.conv_kxk: + out += conv(x) + + out = self.lab(out) + if self.stride != 2: + out = self.act(out) + return out + + def rep(self): + if self.is_repped: + return + kernel, bias = self._get_kernel_bias() + self.reparam_conv = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + groups=self.groups) + self.reparam_conv.weight.data = kernel + self.reparam_conv.bias.data = bias + self.is_repped = True + + def _pad_kernel_1x1_to_kxk(self, kernel1x1, pad): + if not isinstance(kernel1x1, torch.Tensor): + return 0 + else: + return nn.functional.pad(kernel1x1, [pad, pad, pad, pad]) + + def _get_kernel_bias(self): + kernel_conv_1x1, bias_conv_1x1 = self._fuse_bn_tensor(self.conv_1x1) + kernel_conv_1x1 = self._pad_kernel_1x1_to_kxk(kernel_conv_1x1, + self.kernel_size // 2) + + kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) + + kernel_conv_kxk = 0 + bias_conv_kxk = 0 + for conv in self.conv_kxk: + kernel, bias = self._fuse_bn_tensor(conv) + kernel_conv_kxk += kernel + bias_conv_kxk += bias + + kernel_reparam = kernel_conv_kxk + kernel_conv_1x1 + kernel_identity + bias_reparam = bias_conv_kxk + bias_conv_1x1 + bias_identity + return kernel_reparam, bias_reparam + + def _fuse_bn_tensor(self, branch): + if not branch: + return 0, 0 + elif isinstance(branch, ConvBNLayer): + kernel = branch.conv.weight + running_mean = branch.bn._mean + running_var = branch.bn._variance + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn._epsilon + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = torch.zeros( + (self.in_channels, input_dim, self.kernel_size, + self.kernel_size), + dtype=branch.weight.dtype) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, self.kernel_size // 2, + self.kernel_size // 2] = 1 + self.id_tensor = kernel_value + kernel = self.id_tensor + running_mean = branch._mean + running_var = branch._variance + gamma = branch.weight + beta = branch.bias + eps = branch._epsilon + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=4, lr_mult=1.0): + super().__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0, + ) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2d( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0, + ) + self.hardsigmoid = nn.Hardsigmoid(inplace=True) + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = identity * x + return x + + +class LCNetV3Block(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + dw_size, + use_se=False, + conv_kxk_num=4, + lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.use_se = use_se + self.dw_conv = LearnableRepLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + groups=in_channels, + num_conv_branches=conv_kxk_num, + lr_mult=lr_mult, + lab_lr=lab_lr) + if use_se: + self.se = SELayer(in_channels, lr_mult=lr_mult) + self.pw_conv = LearnableRepLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + num_conv_branches=conv_kxk_num, + lr_mult=lr_mult, + lab_lr=lab_lr) + + def forward(self, x): + x = self.dw_conv(x) + if self.use_se: + x = self.se(x) + x = self.pw_conv(x) + return x + + +class PPLCNetV3(nn.Module): + def __init__(self, + scale=1.0, + conv_kxk_num=4, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + lab_lr=0.1, + det=False, + **kwargs): + super().__init__() + self.scale = scale + self.lr_mult_list = lr_mult_list + self.det = det + + self.net_config = NET_CONFIG_det if self.det else NET_CONFIG_rec + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 6, "lr_mult_list length should be 6 but got {}".format( + len(self.lr_mult_list)) + + self.conv1 = ConvBNLayer( + in_channels=3, + out_channels=make_divisible(16 * scale), + kernel_size=3, + stride=2, + lr_mult=self.lr_mult_list[0]) + + self.blocks2 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[1], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks2"]) + ]) + + self.blocks3 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[2], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks3"]) + ]) + + self.blocks4 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[3], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks4"]) + ]) + + self.blocks5 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[4], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks5"]) + ]) + + self.blocks6 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[5], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks6"]) + ]) + self.out_channels = make_divisible(512 * scale) + + if self.det: + mv_c = [16, 24, 56, 480] + self.out_channels = [ + make_divisible(self.net_config["blocks3"][-1][2] * scale), + make_divisible(self.net_config["blocks4"][-1][2] * scale), + make_divisible(self.net_config["blocks5"][-1][2] * scale), + make_divisible(self.net_config["blocks6"][-1][2] * scale), + ] + + self.layer_list = nn.ModuleList([ + nn.Conv2d(self.out_channels[0], int(mv_c[0] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[1], int(mv_c[1] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[2], int(mv_c[2] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[3], int(mv_c[3] * scale), 1, 1, 0) + ]) + self.out_channels = [ + int(mv_c[0] * scale), int(mv_c[1] * scale), + int(mv_c[2] * scale), int(mv_c[3] * scale) + ] + + def forward(self, x): + out_list = [] + x = self.conv1(x) + x = self.blocks2(x) + x = self.blocks3(x) + out_list.append(x) + x = self.blocks4(x) + out_list.append(x) + x = self.blocks5(x) + out_list.append(x) + x = self.blocks6(x) + out_list.append(x) + + if self.det: + out_list[0] = self.layer_list[0](out_list[0]) + out_list[1] = self.layer_list[1](out_list[1]) + out_list[2] = self.layer_list[2](out_list[2]) + out_list[3] = self.layer_list[3](out_list[3]) + return out_list + + if self.training: + x = F.adaptive_avg_pool2d(x, [1, 40]) + else: + x = F.avg_pool2d(x, [3, 2]) + return x diff --git a/modules/pytorchocr/modeling/backbones/rec_lcnetv3_bak.py b/modules/pytorchocr/modeling/backbones/rec_lcnetv3_bak.py new file mode 100644 index 0000000..2c5d18b --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_lcnetv3_bak.py @@ -0,0 +1,444 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +NET_CONFIG_det = { + "blocks2": + # k, in_c, out_c, s, use_se + [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 2, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, 2, False], [3, 128, 128, 1, False]], + "blocks5": + [[3, 128, 256, 2, False], [5, 256, 256, 1, False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, 2, True], [5, 512, 512, 1, True], + [5, 512, 512, 1, False], [5, 512, 512, 1, False]] +} + +NET_CONFIG_rec = { + "blocks2": + # k, in_c, out_c, s, use_se + [[3, 16, 32, 1, False]], + "blocks3": [[3, 32, 64, 1, False], [3, 64, 64, 1, False]], + "blocks4": [[3, 64, 128, (2, 1), False], [3, 128, 128, 1, False]], + "blocks5": + [[3, 128, 256, (1, 2), False], [5, 256, 256, 1, False], + [5, 256, 256, 1, False], [5, 256, 256, 1, False], [5, 256, 256, 1, False]], + "blocks6": [[5, 256, 512, (2, 1), True], [5, 512, 512, 1, True], + [5, 512, 512, (2, 1), False], [5, 512, 512, 1, False]] +} + + +def make_divisible(v, divisor=16, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class LearnableAffineBlock(nn.Module): + def __init__(self, scale_value=1.0, bias_value=0.0, lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.scale = nn.Parameter(torch.Tensor([scale_value])) + self.bias = nn.Parameter(torch.Tensor([bias_value])) + + def forward(self, x): + return self.scale * x + self.bias + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + lr_mult=1.0): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class Act(nn.Module): + def __init__(self, act="hard_swish", lr_mult=1.0, lab_lr=0.1): + super().__init__() + assert act in ['hard_swish', 'relu'] + self.act = Activation(act) + self.lab = LearnableAffineBlock(lr_mult=lr_mult, lab_lr=lab_lr) + + def forward(self, x): + return self.lab(self.act(x)) + + +class LearnableRepLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + num_conv_branches=1, + lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.is_repped = False + self.groups = groups + self.stride = stride + self.kernel_size = kernel_size + self.in_channels = in_channels + self.out_channels = out_channels + self.num_conv_branches = num_conv_branches + self.padding = (kernel_size - 1) // 2 + self.identity = nn.BatchNorm2d(in_channels) if out_channels == in_channels and stride == 1 else None + + self.conv_kxk = nn.ModuleList([ + ConvBNLayer( + in_channels, + out_channels, + kernel_size, + stride, + groups=groups, + lr_mult=lr_mult) for _ in range(self.num_conv_branches) + ]) + + self.conv_1x1 = ConvBNLayer( + in_channels, + out_channels, + 1, + stride, + groups=groups, + lr_mult=lr_mult) if kernel_size > 1 else None + self.lab = LearnableAffineBlock(lr_mult=lr_mult, lab_lr=lab_lr) + self.act = Act(lr_mult=lr_mult, lab_lr=lab_lr) + + def forward(self, x): + # for export + if self.is_repped: + out = self.lab(self.reparam_conv(x)) + if self.stride != 2: + out = self.act(out) + return out + + out = 0 + if self.identity is not None: + out += self.identity(x) + + if self.conv_1x1 is not None: + out += self.conv_1x1(x) + + for conv in self.conv_kxk: + out += conv(x) + + out = self.lab(out) + if self.stride != 2: + out = self.act(out) + return out + + def rep(self): + if self.is_repped: + return + kernel, bias = self._get_kernel_bias() + self.reparam_conv = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + groups=self.groups) + self.reparam_conv.weight.data = kernel + self.reparam_conv.bias.data = bias + self.is_repped = True + + def _pad_kernel_1x1_to_kxk(self, kernel1x1, pad): + if not isinstance(kernel1x1, torch.Tensor): + return 0 + else: + return nn.functional.pad(kernel1x1, [pad, pad, pad, pad]) + + def _get_kernel_bias(self): + kernel_conv_1x1, bias_conv_1x1 = self._fuse_bn_tensor(self.conv_1x1) + kernel_conv_1x1 = self._pad_kernel_1x1_to_kxk(kernel_conv_1x1, + self.kernel_size // 2) + + kernel_identity, bias_identity = self._fuse_bn_tensor(self.identity) + + kernel_conv_kxk = 0 + bias_conv_kxk = 0 + for conv in self.conv_kxk: + kernel, bias = self._fuse_bn_tensor(conv) + kernel_conv_kxk += kernel + bias_conv_kxk += bias + + kernel_reparam = kernel_conv_kxk + kernel_conv_1x1 + kernel_identity + bias_reparam = bias_conv_kxk + bias_conv_1x1 + bias_identity + return kernel_reparam, bias_reparam + + def _fuse_bn_tensor(self, branch): + if not branch: + return 0, 0 + elif isinstance(branch, ConvBNLayer): + kernel = branch.conv.weight + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = torch.zeros( + (self.in_channels, input_dim, self.kernel_size, + self.kernel_size), + dtype=branch.weight.dtype) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, self.kernel_size // 2, + self.kernel_size // 2] = 1 + self.id_tensor = kernel_value + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape((-1, 1, 1, 1)) + return kernel * t, beta - running_mean * gamma / std + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=4, lr_mult=1.0): + super().__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2d( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + self.hardsigmoid = Activation('hard_sigmoid') + + def forward(self, x): + identity = x + x = self.avg_pool(x) + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.hardsigmoid(x) + x = x * identity + return x + + +class LCNetV3Block(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + dw_size, + use_se=False, + conv_kxk_num=4, + lr_mult=1.0, + lab_lr=0.1): + super().__init__() + self.use_se = use_se + self.dw_conv = LearnableRepLayer( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=dw_size, + stride=stride, + groups=in_channels, + num_conv_branches=conv_kxk_num, + lr_mult=lr_mult, + lab_lr=lab_lr) + if use_se: + self.se = SELayer(in_channels, lr_mult=lr_mult) + self.pw_conv = LearnableRepLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + num_conv_branches=conv_kxk_num, + lr_mult=lr_mult, + lab_lr=lab_lr) + + def forward(self, x): + x = self.dw_conv(x) + if self.use_se: + x = self.se(x) + x = self.pw_conv(x) + return x + + +class PPLCNetV3(nn.Module): + def __init__(self, + scale=1.0, + conv_kxk_num=4, + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + lab_lr=0.1, + det=False, + **kwargs): + super().__init__() + self.scale = scale + self.lr_mult_list = lr_mult_list + self.det = det + + self.net_config = NET_CONFIG_det if self.det else NET_CONFIG_rec + + assert isinstance(self.lr_mult_list, ( + list, tuple + )), "lr_mult_list should be in (list, tuple) but got {}".format( + type(self.lr_mult_list)) + assert len(self.lr_mult_list + ) == 6, "lr_mult_list length should be 6 but got {}".format( + len(self.lr_mult_list)) + + self.conv1 = ConvBNLayer( + in_channels=3, + out_channels=make_divisible(16 * scale), + kernel_size=3, + stride=2, + lr_mult=self.lr_mult_list[0]) + + self.blocks2 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[1], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks2"]) + ]) + + self.blocks3 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[2], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks3"]) + ]) + + self.blocks4 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[3], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks4"]) + ]) + + self.blocks5 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[4], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks5"]) + ]) + + self.blocks6 = nn.Sequential(*[ + LCNetV3Block( + in_channels=make_divisible(in_c * scale), + out_channels=make_divisible(out_c * scale), + dw_size=k, + stride=s, + use_se=se, + conv_kxk_num=conv_kxk_num, + lr_mult=self.lr_mult_list[5], + lab_lr=lab_lr) + for i, (k, in_c, out_c, s, se + ) in enumerate(self.net_config["blocks6"]) + ]) + self.out_channels = make_divisible(512 * scale) + + if self.det: + mv_c = [16, 24, 56, 480] + self.out_channels = [ + make_divisible(self.net_config["blocks3"][-1][2] * scale), + make_divisible(self.net_config["blocks4"][-1][2] * scale), + make_divisible(self.net_config["blocks5"][-1][2] * scale), + make_divisible(self.net_config["blocks6"][-1][2] * scale), + ] + + self.layer_list = nn.ModuleList([ + nn.Conv2d(self.out_channels[0], int(mv_c[0] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[1], int(mv_c[1] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[2], int(mv_c[2] * scale), 1, 1, 0), + nn.Conv2d(self.out_channels[3], int(mv_c[3] * scale), 1, 1, 0) + ]) + self.out_channels = [ + int(mv_c[0] * scale), int(mv_c[1] * scale), + int(mv_c[2] * scale), int(mv_c[3] * scale) + ] + + def forward(self, x): + out_list = [] + x = self.conv1(x) + + x = self.blocks2(x) + x = self.blocks3(x) + out_list.append(x) + x = self.blocks4(x) + out_list.append(x) + x = self.blocks5(x) + out_list.append(x) + import numpy as np + x = torch.Tensor(np.load('../PaddleOCR4debug/tmp.npy')) + x = self.blocks6(x) + out_list.append(x) + + if self.det: + out_list[0] = self.layer_list[0](out_list[0]) + out_list[1] = self.layer_list[1](out_list[1]) + out_list[2] = self.layer_list[2](out_list[2]) + out_list[3] = self.layer_list[3](out_list[3]) + return out_list + + if self.training: + x = F.adaptive_avg_pool2d(x, [1, 40]) + else: + x = F.avg_pool2d(x, [3, 2]) + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/rec_mobilenet_v3.py b/modules/pytorchocr/modeling/backbones/rec_mobilenet_v3.py new file mode 100644 index 0000000..52cc7a1 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_mobilenet_v3.py @@ -0,0 +1,125 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +from .det_mobilenet_v3 import ResidualUnit, ConvBNLayer, make_divisible + +class MobileNetV3(nn.Module): + def __init__(self, + in_channels=3, + model_name='small', + scale=0.5, + large_stride=None, + small_stride=None, + **kwargs): + super(MobileNetV3, self).__init__() + if small_stride is None: + small_stride = [2, 2, 2, 2] + if large_stride is None: + large_stride = [1, 2, 2, 2] + + assert isinstance(large_stride, list), "large_stride type must " \ + "be list but got {}".format(type(large_stride)) + assert isinstance(small_stride, list), "small_stride type must " \ + "be list but got {}".format(type(small_stride)) + assert len(large_stride) == 4, "large_stride length must be " \ + "4 but got {}".format(len(large_stride)) + assert len(small_stride) == 4, "small_stride length must be " \ + "4 but got {}".format(len(small_stride)) + + if model_name == "large": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, 'relu', large_stride[0]], + [3, 64, 24, False, 'relu', (large_stride[1], 1)], + [3, 72, 24, False, 'relu', 1], + [5, 72, 40, True, 'relu', (large_stride[2], 1)], + [5, 120, 40, True, 'relu', 1], + [5, 120, 40, True, 'relu', 1], + [3, 240, 80, False, 'hard_swish', 1], + [3, 200, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 480, 112, True, 'hard_swish', 1], + [3, 672, 112, True, 'hard_swish', 1], + [5, 672, 160, True, 'hard_swish', (large_stride[3], 1)], + [5, 960, 160, True, 'hard_swish', 1], + [5, 960, 160, True, 'hard_swish', 1], + ] + cls_ch_squeeze = 960 + elif model_name == "small": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, 'relu', (small_stride[0], 1)], + [3, 72, 24, False, 'relu', (small_stride[1], 1)], + [3, 88, 24, False, 'relu', 1], + [5, 96, 40, True, 'hard_swish', (small_stride[2], 1)], + [5, 240, 40, True, 'hard_swish', 1], + [5, 240, 40, True, 'hard_swish', 1], + [5, 120, 48, True, 'hard_swish', 1], + [5, 144, 48, True, 'hard_swish', 1], + [5, 288, 96, True, 'hard_swish', (small_stride[3], 1)], + [5, 576, 96, True, 'hard_swish', 1], + [5, 576, 96, True, 'hard_swish', 1], + ] + cls_ch_squeeze = 576 + else: + raise NotImplementedError("mode[" + model_name + + "_model] is not implemented!") + + supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25] + assert scale in supported_scale, \ + "supported scales are {} but input scale is {}".format(supported_scale, scale) + + inplanes = 16 + # conv1 + self.conv1 = ConvBNLayer( + in_channels=in_channels, + out_channels=make_divisible(inplanes * scale), + kernel_size=3, + stride=2, + padding=1, + groups=1, + if_act=True, + act='hard_swish', + name='conv1') + i = 0 + block_list = [] + inplanes = make_divisible(inplanes * scale) + for (k, exp, c, se, nl, s) in cfg: + block_list.append( + ResidualUnit( + in_channels=inplanes, + mid_channels=make_divisible(scale * exp), + out_channels=make_divisible(scale * c), + kernel_size=k, + stride=s, + use_se=se, + act=nl, + name='conv' + str(i + 2))) + inplanes = make_divisible(scale * c) + i += 1 + self.blocks = nn.Sequential(*block_list) + + self.conv2 = ConvBNLayer( + in_channels=inplanes, + out_channels=make_divisible(scale * cls_ch_squeeze), + kernel_size=1, + stride=1, + padding=0, + groups=1, + if_act=True, + act='hard_swish', + name='conv_last') + + self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.out_channels = make_divisible(scale * cls_ch_squeeze) + + def forward(self, x): + x = self.conv1(x) + x = self.blocks(x) + x = self.conv2(x) + x = self.pool(x) + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/rec_mv1_enhance.py b/modules/pytorchocr/modeling/backbones/rec_mv1_enhance.py new file mode 100644 index 0000000..b4f9643 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_mv1_enhance.py @@ -0,0 +1,233 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + + +class ConvBNLayer(nn.Module): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + act='hard_swish'): + super(ConvBNLayer, self).__init__() + self.act = act + self._conv = nn.Conv2d( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + bias=False) + + self._batch_norm = nn.BatchNorm2d( + num_filters, + ) + if self.act is not None: + self._act = Activation(act_type=act, inplace=True) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class DepthwiseSeparable(nn.Module): + def __init__(self, + num_channels, + num_filters1, + num_filters2, + num_groups, + stride, + scale, + dw_size=3, + padding=1, + use_se=False): + super(DepthwiseSeparable, self).__init__() + self.use_se = use_se + self._depthwise_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=int(num_filters1 * scale), + filter_size=dw_size, + stride=stride, + padding=padding, + num_groups=int(num_groups * scale)) + if use_se: + self._se = SEModule(int(num_filters1 * scale)) + self._pointwise_conv = ConvBNLayer( + num_channels=int(num_filters1 * scale), + filter_size=1, + num_filters=int(num_filters2 * scale), + stride=1, + padding=0) + + def forward(self, inputs): + y = self._depthwise_conv(inputs) + if self.use_se: + y = self._se(y) + y = self._pointwise_conv(y) + return y + + +class MobileNetV1Enhance(nn.Module): + def __init__(self, + in_channels=3, + scale=0.5, + last_conv_stride=1, + last_pool_type='max', + **kwargs): + super().__init__() + self.scale = scale + self.block_list = [] + + self.conv1 = ConvBNLayer( + num_channels=in_channels, + filter_size=3, + channels=3, + num_filters=int(32 * scale), + stride=2, + padding=1) + + conv2_1 = DepthwiseSeparable( + num_channels=int(32 * scale), + num_filters1=32, + num_filters2=64, + num_groups=32, + stride=1, + scale=scale) + self.block_list.append(conv2_1) + + conv2_2 = DepthwiseSeparable( + num_channels=int(64 * scale), + num_filters1=64, + num_filters2=128, + num_groups=64, + stride=1, + scale=scale) + self.block_list.append(conv2_2) + + conv3_1 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=128, + num_groups=128, + stride=1, + scale=scale) + self.block_list.append(conv3_1) + + conv3_2 = DepthwiseSeparable( + num_channels=int(128 * scale), + num_filters1=128, + num_filters2=256, + num_groups=128, + stride=(2, 1), + scale=scale) + self.block_list.append(conv3_2) + + conv4_1 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=256, + num_groups=256, + stride=1, + scale=scale) + self.block_list.append(conv4_1) + + conv4_2 = DepthwiseSeparable( + num_channels=int(256 * scale), + num_filters1=256, + num_filters2=512, + num_groups=256, + stride=(2, 1), + scale=scale) + self.block_list.append(conv4_2) + + for _ in range(5): + conv5 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=512, + num_groups=512, + stride=1, + dw_size=5, + padding=2, + scale=scale, + use_se=False) + self.block_list.append(conv5) + + conv5_6 = DepthwiseSeparable( + num_channels=int(512 * scale), + num_filters1=512, + num_filters2=1024, + num_groups=512, + stride=(2, 1), + dw_size=5, + padding=2, + scale=scale, + use_se=True) + self.block_list.append(conv5_6) + + conv6 = DepthwiseSeparable( + num_channels=int(1024 * scale), + num_filters1=1024, + num_filters2=1024, + num_groups=1024, + stride=last_conv_stride, + dw_size=5, + padding=2, + use_se=True, + scale=scale) + self.block_list.append(conv6) + + self.block_list = nn.Sequential(*self.block_list) + if last_pool_type == 'avg': + self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) + else: + self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.out_channels = int(1024 * scale) + + def forward(self, inputs): + y = self.conv1(inputs) + y = self.block_list(y) + y = self.pool(y) + return y + +def hardsigmoid(x): + return F.relu6(x + 3., inplace=True) / 6. + +class SEModule(nn.Module): + def __init__(self, channel, reduction=4): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.conv2 = nn.Conv2d( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0, + bias=True) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = F.relu(outputs) + outputs = self.conv2(outputs) + outputs = hardsigmoid(outputs) + x = torch.mul(inputs, outputs) + + return x diff --git a/modules/pytorchocr/modeling/backbones/rec_nrtr_mtb.py b/modules/pytorchocr/modeling/backbones/rec_nrtr_mtb.py new file mode 100644 index 0000000..33e375c --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_nrtr_mtb.py @@ -0,0 +1,36 @@ + +import torch +from torch import nn + + +class MTB(nn.Module): + def __init__(self, cnn_num, in_channels): + super(MTB, self).__init__() + self.block = nn.Sequential() + self.out_channels = in_channels + self.cnn_num = cnn_num + if self.cnn_num == 2: + for i in range(self.cnn_num): + self.block.add_module( + 'conv_{}'.format(i), + nn.Conv2d( + in_channels=in_channels + if i == 0 else 32 * (2**(i - 1)), + out_channels=32 * (2**i), + kernel_size=3, + stride=2, + padding=1)) + self.block.add_module('relu_{}'.format(i), nn.ReLU()) + self.block.add_module('bn_{}'.format(i), + nn.BatchNorm2d(32 * (2**i))) + + + def forward(self, images): + x = self.block(images) + if self.cnn_num == 2: + # (b, w, h, c) + x = x.permute(0, 3, 2, 1) + x_shape = x.shape + x = torch.reshape( + x, (x_shape[0], x_shape[1], x_shape[2] * x_shape[3])) + return x diff --git a/modules/pytorchocr/modeling/backbones/rec_resnet_31.py b/modules/pytorchocr/modeling/backbones/rec_resnet_31.py new file mode 100644 index 0000000..ab7c30f --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_resnet_31.py @@ -0,0 +1,200 @@ +""" +This code is refer from: +https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/layers/conv_layer.py +https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/backbones/resnet31_ocr.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +# import paddle +# from paddle import ParamAttr +# import paddle.nn as nn +# import paddle.nn.functional as F + + +__all__ = ["ResNet31"] + + +def conv3x3(in_channel, out_channel, stride=1): + return nn.Conv2d( + in_channel, + out_channel, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_channels, channels, stride=1, downsample=False): + super().__init__() + self.conv1 = conv3x3(in_channels, channels, stride) + self.bn1 = nn.BatchNorm2d(channels) + self.relu = nn.ReLU() + self.conv2 = conv3x3(channels, channels) + self.bn2 = nn.BatchNorm2d(channels) + self.downsample = downsample + if downsample: + self.downsample = nn.Sequential( + nn.Conv2d( + in_channels, + channels * self.expansion, + 1, + stride, + bias=False), + nn.BatchNorm2d(channels * self.expansion), ) + else: + self.downsample = nn.Sequential() + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet31(nn.Module): + ''' + Args: + in_channels (int): Number of channels of input image tensor. + layers (list[int]): List of BasicBlock number for each stage. + channels (list[int]): List of out_channels of Conv2d layer. + out_indices (None | Sequence[int]): Indices of output stages. + last_stage_pool (bool): If True, add `MaxPool2d` layer to last stage. + ''' + + def __init__(self, + in_channels=3, + layers=[1, 2, 5, 3], + channels=[64, 128, 256, 256, 512, 512, 512], + out_indices=None, + last_stage_pool=False): + super(ResNet31, self).__init__() + assert isinstance(in_channels, int) + assert isinstance(last_stage_pool, bool) + + self.out_indices = out_indices + self.last_stage_pool = last_stage_pool + + # conv 1 (Conv Conv) + self.conv1_1 = nn.Conv2d( + in_channels, channels[0], kernel_size=3, stride=1, padding=1) + self.bn1_1 = nn.BatchNorm2d(channels[0]) + self.relu1_1 = nn.ReLU(inplace=True) + + self.conv1_2 = nn.Conv2d( + channels[0], channels[1], kernel_size=3, stride=1, padding=1) + self.bn1_2 = nn.BatchNorm2d(channels[1]) + self.relu1_2 = nn.ReLU(inplace=True) + + # conv 2 (Max-pooling, Residual block, Conv) + self.pool2 = nn.MaxPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.block2 = self._make_layer(channels[1], channels[2], layers[0]) + self.conv2 = nn.Conv2d( + channels[2], channels[2], kernel_size=3, stride=1, padding=1) + self.bn2 = nn.BatchNorm2d(channels[2]) + self.relu2 = nn.ReLU(inplace=True) + + # conv 3 (Max-pooling, Residual block, Conv) + self.pool3 = nn.MaxPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.block3 = self._make_layer(channels[2], channels[3], layers[1]) + self.conv3 = nn.Conv2d( + channels[3], channels[3], kernel_size=3, stride=1, padding=1) + self.bn3 = nn.BatchNorm2d(channels[3]) + self.relu3 = nn.ReLU(inplace=True) + + # conv 4 (Max-pooling, Residual block, Conv) + self.pool4 = nn.MaxPool2d( + kernel_size=(2, 1), stride=(2, 1), padding=0, ceil_mode=True) + self.block4 = self._make_layer(channels[3], channels[4], layers[2]) + self.conv4 = nn.Conv2d( + channels[4], channels[4], kernel_size=3, stride=1, padding=1) + self.bn4 = nn.BatchNorm2d(channels[4]) + self.relu4 = nn.ReLU(inplace=True) + + # conv 5 ((Max-pooling), Residual block, Conv) + self.pool5 = None + if self.last_stage_pool: + self.pool5 = nn.MaxPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self.block5 = self._make_layer(channels[4], channels[5], layers[3]) + self.conv5 = nn.Conv2d( + channels[5], channels[5], kernel_size=3, stride=1, padding=1) + self.bn5 = nn.BatchNorm2d(channels[5]) + self.relu5 = nn.ReLU(inplace=True) + + self.out_channels = channels[-1] + + def _make_layer(self, input_channels, output_channels, blocks): + layers = [] + for _ in range(blocks): + downsample = None + if input_channels != output_channels: + downsample = nn.Sequential( + nn.Conv2d( + input_channels, + output_channels, + kernel_size=1, + stride=1, + bias=False), + nn.BatchNorm2d(output_channels), ) + + layers.append( + BasicBlock( + input_channels, output_channels, downsample=downsample)) + input_channels = output_channels + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1_1(x) + x = self.bn1_1(x) + x = self.relu1_1(x) + + x = self.conv1_2(x) + x = self.bn1_2(x) + x = self.relu1_2(x) + + outs = [] + for i in range(4): + layer_index = i + 2 + pool_layer = getattr(self, 'pool{}'.format(layer_index)) + block_layer = getattr(self, 'block{}'.format(layer_index)) + conv_layer = getattr(self, 'conv{}'.format(layer_index)) + bn_layer = getattr(self, 'bn{}'.format(layer_index)) + relu_layer = getattr(self, 'relu{}'.format(layer_index)) + + if pool_layer is not None: + x = pool_layer(x) + x = block_layer(x) + x = conv_layer(x) + x = bn_layer(x) + x = relu_layer(x) + + outs.append(x) + + if self.out_indices is not None: + return tuple([outs[i] for i in self.out_indices]) + + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/rec_resnet_fpn.py b/modules/pytorchocr/modeling/backbones/rec_resnet_fpn.py new file mode 100644 index 0000000..e9bad7b --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_resnet_fpn.py @@ -0,0 +1,278 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os, sys +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +__all__ = ["ResNetFPN"] + +class ResNetFPN(nn.Module): + def __init__(self, in_channels=1, layers=50, **kwargs): + super(ResNetFPN, self).__init__() + supported_layers = { + 18: { + 'depth': [2, 2, 2, 2], + 'block_class': BasicBlock + }, + 34: { + 'depth': [3, 4, 6, 3], + 'block_class': BasicBlock + }, + 50: { + 'depth': [3, 4, 6, 3], + 'block_class': BottleneckBlock + }, + 101: { + 'depth': [3, 4, 23, 3], + 'block_class': BottleneckBlock + }, + 152: { + 'depth': [3, 8, 36, 3], + 'block_class': BottleneckBlock + } + } + stride_list = [(2, 2), (2, 2), (1, 1), (1, 1)] + num_filters = [64, 128, 256, 512] + self.depth = supported_layers[layers]['depth'] + self.conv = ConvBNLayer( + in_channels=in_channels, + out_channels=64, + kernel_size=7, + stride=2, + act="relu", + name="conv1") + self.block_list = nn.ModuleList() + in_ch = 64 + if layers >= 50: + for block in range(len(self.depth)): + for i in range(self.depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + + bottlenectBlock = BottleneckBlock( + in_channels=in_ch, + out_channels=num_filters[block], + stride=stride_list[block] if i == 0 else 1, + name=conv_name) + + in_ch = num_filters[block] * 4 + self.block_list.add_module("bottleneckBlock_{}_{}".format(block, i), bottlenectBlock) + + else: + for block in range(len(self.depth)): + for i in range(self.depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + if i == 0 and block != 0: + stride = (2, 1) + else: + stride = (1, 1) + + basicBlock = BasicBlock( + in_channels=in_ch, + out_channels=num_filters[block], + stride=stride_list[block] if i == 0 else 1, + is_first=block == i == 0, + name=conv_name) + in_ch = basicBlock.out_channels + self.block_list.add_module(conv_name, basicBlock) + out_ch_list = [in_ch // 4, in_ch // 2, in_ch] + self.base_block = nn.ModuleList() + self.conv_trans = [] + self.bn_block = [] + for i in [-2, -3]: + in_channels = out_ch_list[i + 1] + out_ch_list[i] + bb_0 = nn.Conv2d( + in_channels=in_channels, + out_channels=out_ch_list[i], + kernel_size=1, + bias=True) + self.base_block.add_module("F_{}_base_block_0".format(i), bb_0) + bb_1 = nn.Conv2d( + in_channels=out_ch_list[i], + out_channels=out_ch_list[i], + kernel_size=3, + padding=1, + bias=True) + self.base_block.add_module("F_{}_base_block_1".format(i), bb_1) + bb_2 = nn.Sequential( + nn.BatchNorm2d(out_ch_list[i]), + Activation("relu") + ) + self.base_block.add_module("F_{}_base_block_2".format(i), bb_2) + + bb_3 = nn.Conv2d( + in_channels=out_ch_list[i], + out_channels=512, + kernel_size=1, + bias=True) + self.base_block.add_module("F_{}_base_block_3".format(i), bb_3) + self.out_channels = 512 + + def __call__(self, x): + x = self.conv(x) + fpn_list = [] + F = [] + for i in range(len(self.depth)): + fpn_list.append(np.sum(self.depth[:i + 1])) + + for i, block in enumerate(self.block_list): + x = block(x) + for number in fpn_list: + if i + 1 == number: + F.append(x) + base = F[-1] + + j = 0 + for i, block in enumerate(self.base_block): + if i % 3 == 0 and i < 6: + j = j + 1 + b, c, w, h = F[-j - 1].shape + if [w, h] == list(base.shape[2:]): + base = base + else: + base = self.conv_trans[j - 1](base) + base = self.bn_block[j - 1](base) + base = torch.cat([base, F[-j - 1]], dim=1) + base = block(base) + return base + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=2 if stride == (1, 1) else kernel_size, + dilation=2 if stride == (1, 1) else 1, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False, ) + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + + self.bn = nn.BatchNorm2d(out_channels) + self.act = act + if self.act is not None: + self._act = Activation(act_type=self.act, inplace=True) + + def __call__(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class ShortCut(nn.Module): + def __init__(self, in_channels, out_channels, stride, name, is_first=False): + super(ShortCut, self).__init__() + self.use_conv = True + + if in_channels != out_channels or stride != 1 or is_first == True: + if stride == (1, 1): + self.conv = ConvBNLayer( + in_channels, out_channels, 1, 1, name=name) + else: # stride==(2,2) + self.conv = ConvBNLayer( + in_channels, out_channels, 1, stride, name=name) + else: + self.use_conv = False + + def forward(self, x): + if self.use_conv: + x = self.conv(x) + return x + + +class BottleneckBlock(nn.Module): + def __init__(self, in_channels, out_channels, stride, name): + super(BottleneckBlock, self).__init__() + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + self.short = ShortCut( + in_channels=in_channels, + out_channels=out_channels * 4, + stride=stride, + is_first=False, + name=name + "_branch1") + self.out_channels = out_channels * 4 + + def forward(self, x): + y = self.conv0(x) + y = self.conv1(y) + y = self.conv2(y) + y = y + self.short(x) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, in_channels, out_channels, stride, name, is_first): + super(BasicBlock, self).__init__() + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + act='relu', + stride=stride, + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + self.short = ShortCut( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + is_first=is_first, + name=name + "_branch1") + self.out_channels = out_channels + + def forward(self, x): + y = self.conv0(x) + y = self.conv1(y) + y = y + self.short(x) + return F.relu(y) diff --git a/modules/pytorchocr/modeling/backbones/rec_resnet_vd.py b/modules/pytorchocr/modeling/backbones/rec_resnet_vd.py new file mode 100644 index 0000000..b8a2ed3 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_resnet_vd.py @@ -0,0 +1,260 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +class ConvBNLayer(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + self.act = act + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2d( + kernel_size=stride, stride=stride, padding=0, ceil_mode=True) + + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=1 if is_vd_mode else stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + + self._batch_norm = nn.BatchNorm2d( + out_channels,) + if self.act is not None: + self._act = Activation(act_type=act, inplace=True) + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class BottleneckBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=stride, + is_vd_mode=not if_first and stride[0] != 1, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = short + conv2 + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=stride, + is_vd_mode=not if_first and stride[0] != 1, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = short + conv1 + y = F.relu(y) + return y + + +class ResNet(nn.Module): + def __init__(self, in_channels=3, layers=50, **kwargs): + super(ResNet, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # self.block_list = list() + self.block_list = nn.Sequential() + if layers >= 50: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + if layers in [101, 152, 200] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + + if i == 0 and block != 0: + stride = (2, 1) + else: + stride = (1, 1) + + bottleneck_block = BottleneckBlock(in_channels=num_channels[block] if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=stride, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + shortcut = True + # self.block_list.append(bottleneck_block) + self.block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block) + self.out_channels = num_filters[block] + else: + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + if i == 0 and block != 0: + stride = (2, 1) + else: + stride = (1, 1) + + basic_block = BasicBlock(in_channels=num_channels[block] if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=stride, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name) + + shortcut = True + # self.block_list.append(basic_block) + self.block_list.add_module('bb_%d_%d' % (block, i), basic_block) + self.out_channels = num_filters[block] + self.out_pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + for block in self.block_list: + y = block(y) + y = self.out_pool(y) + + return y \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/rec_svtrnet.py b/modules/pytorchocr/modeling/backbones/rec_svtrnet.py new file mode 100644 index 0000000..1ba3d4f --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_svtrnet.py @@ -0,0 +1,582 @@ +import torch +import torch.nn as nn +from pytorchocr.modeling.common import Activation +import numpy as np + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + """ + if drop_prob == 0. or not training: + return x + keep_prob = torch.as_tensor(1 - drop_prob) + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype) + random_tensor = torch.floor(random_tensor) # binarize + output = x.divide(keep_prob) * random_tensor + return output + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=0, + bias_attr=False, + groups=1, + act='gelu'): + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=bias_attr) + self.norm = nn.BatchNorm2d(out_channels) + self.act = Activation(act_type=act, inplace=True) + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + out = self.act(out) + return out + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Identity(nn.Module): + def __init__(self): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +class Mlp(nn.Module): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer='gelu', + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = Activation(act_type=act_layer, inplace=True) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMixer(nn.Module): + def __init__( + self, + dim, + num_heads=8, + HW=[8, 25], + local_k=[3, 3], ): + super().__init__() + self.HW = HW + self.dim = dim + self.local_mixer = nn.Conv2d( + dim, + dim, + local_k, + 1, [local_k[0] // 2, local_k[1] // 2], + groups=num_heads, + ) + + def forward(self, x): + h = self.HW[0] + w = self.HW[1] + x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) + x = self.local_mixer(x) + x = x.flatten(2).permute(0, 2, 1) + return x + + +class Attention(nn.Module): + def __init__(self, + dim, + num_heads=8, + mixer='Global', + HW=[8, 25], + local_k=[7, 11], + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.HW = HW + if HW is not None: + H = HW[0] + W = HW[1] + self.N = H * W + self.C = dim + if mixer == 'Local' and HW is not None: + hk = local_k[0] + wk = local_k[1] + mask = torch.ones(H * W, H + hk - 1, W + wk - 1, dtype=torch.float32) + for h in range(0, H): + for w in range(0, W): + mask[h * W + w, h:h + hk, w:w + wk] = 0. + mask_paddle = mask[:, hk // 2:H + hk // 2, wk // 2:W + wk // + 2].flatten(1) + mask_inf = torch.full([H * W, H * W], fill_value=float("-Inf"), dtype=torch.float32) + mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf) + self.mask = mask.unsqueeze(0).unsqueeze(1) + # self.mask = mask[None, None, :] + self.mixer = mixer + + def forward(self, x): + if self.HW is not None: + N = self.N + C = self.C + else: + _, N, C = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape((-1, N, 3, self.num_heads, C // self.num_heads)).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q.matmul(k.permute(0, 1, 3, 2))) + if self.mixer == 'Local': + attn += self.mask + attn = nn.functional.softmax(attn, dim=-1) + attn = self.attn_drop(attn) + + x = (attn.matmul(v)).permute(0, 2, 1, 3).reshape((-1, N, C)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, + dim, + num_heads, + mixer='Global', + local_mixer=[7, 11], + HW=None, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer='gelu', + norm_layer='nn.LayerNorm', + epsilon=1e-6, + prenorm=True): + super().__init__() + if isinstance(norm_layer, str): + self.norm1 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm1 = norm_layer(dim) + if mixer == 'Global' or mixer == 'Local': + self.mixer = Attention( + dim, + num_heads=num_heads, + mixer=mixer, + HW=HW, + local_k=local_mixer, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + elif mixer == 'Conv': + self.mixer = ConvMixer( + dim, num_heads=num_heads, HW=HW, local_k=local_mixer) + else: + raise TypeError("The mixer must be one of [Global, Local, Conv]") + + self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity() + if isinstance(norm_layer, str): + self.norm2 = eval(norm_layer)(dim, eps=epsilon) + else: + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp_ratio = mlp_ratio + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.prenorm = prenorm + + def forward(self, x): + if self.prenorm: + x = self.norm1(x + self.drop_path(self.mixer(x))) + x = self.norm2(x + self.drop_path(self.mlp(x))) + else: + x = x + self.drop_path(self.mixer(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=[32, 100], + in_channels=3, + embed_dim=768, + sub_num=2, + patch_size=[4, 4], + mode='pope', + ): + super().__init__() + num_patches = (img_size[1] // (2 ** sub_num)) * \ + (img_size[0] // (2 ** sub_num)) + self.img_size = img_size + self.num_patches = num_patches + self.embed_dim = embed_dim + self.norm = None + if mode == 'pope': + if sub_num == 2: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act='gelu', + bias_attr=True), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act='gelu', + bias_attr=True)) + if sub_num == 3: + self.proj = nn.Sequential( + ConvBNLayer( + in_channels=in_channels, + out_channels=embed_dim // 4, + kernel_size=3, + stride=2, + padding=1, + act='gelu', + bias_attr=True), + ConvBNLayer( + in_channels=embed_dim // 4, + out_channels=embed_dim // 2, + kernel_size=3, + stride=2, + padding=1, + act='gelu', + bias_attr=True), + ConvBNLayer( + in_channels=embed_dim // 2, + out_channels=embed_dim, + kernel_size=3, + stride=2, + padding=1, + act='gelu', + bias_attr=True)) + elif mode == 'linear': + self.proj = nn.Conv2d( + 1, embed_dim, kernel_size=patch_size, stride=patch_size) + self.num_patches = img_size[0] // patch_size[0] * img_size[ + 1] // patch_size[1] + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + "Input image size ({}*{}) doesn't match model ({}*{}).".format( + H,W,self.img_size[0],self.img_size[1] + ) + x = self.proj(x).flatten(2).permute(0, 2, 1) + return x + + +class SubSample(nn.Module): + def __init__(self, + in_channels, + out_channels, + types='Pool', + stride=[2, 1], + sub_norm='nn.LayerNorm', + act=None): + super().__init__() + self.types = types + if types == 'Pool': + self.avgpool = nn.AvgPool2d( + kernel_size=[3, 5], stride=stride, padding=[1, 2]) + self.maxpool = nn.MaxPool2d( + kernel_size=[3, 5], stride=stride, padding=[1, 2]) + self.proj = nn.Linear(in_channels, out_channels) + else: + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + ) + self.norm = eval(sub_norm)(out_channels) + if act is not None: + self.act = act() + else: + self.act = None + + def forward(self, x): + + if self.types == 'Pool': + x1 = self.avgpool(x) + x2 = self.maxpool(x) + x = (x1 + x2) * 0.5 + out = self.proj(x.flatten(2).permute(0, 2, 1)) + else: + x = self.conv(x) + out = x.flatten(2).permute(0, 2, 1) + out = self.norm(out) + if self.act is not None: + out = self.act(out) + + return out + + +class SVTRNet(nn.Module): + def __init__( + self, + img_size=[32, 100], + in_channels=3, + embed_dim=[64, 128, 256], + depth=[3, 6, 3], + num_heads=[2, 4, 8], + mixer=['Local'] * 6 + ['Global'] * + 6, # Local atten, Global atten, Conv + local_mixer=[[7, 11], [7, 11], [7, 11]], + patch_merging='Conv', # Conv, Pool, None + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + last_drop=0.0, + attn_drop_rate=0., + drop_path_rate=0.1, + norm_layer='nn.LayerNorm', + sub_norm='nn.LayerNorm', + epsilon=1e-6, + out_channels=192, + out_char_num=25, + block_unit='Block', + act='gelu', + last_stage=True, + sub_num=2, + prenorm=True, + use_lenhead=False, + **kwargs): + super().__init__() + self.img_size = img_size + self.embed_dim = embed_dim + self.out_channels = out_channels + self.prenorm = prenorm + patch_merging = None if patch_merging != 'Conv' and patch_merging != 'Pool' else patch_merging + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim[0], + sub_num=sub_num) + num_patches = self.patch_embed.num_patches + self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + Block_unit = eval(block_unit) + + dpr = np.linspace(0, drop_path_rate, sum(depth)) + self.blocks1 = nn.ModuleList([ + Block_unit( + dim=embed_dim[0], + num_heads=num_heads[0], + mixer=mixer[0:depth[0]][i], + HW=self.HW, + local_mixer=local_mixer[0], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=act, + attn_drop=attn_drop_rate, + drop_path=dpr[0:depth[0]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[0]) + ]) + if patch_merging is not None: + self.sub_sample1 = SubSample( + embed_dim[0], + embed_dim[1], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 2, self.HW[1]] + else: + HW = self.HW + self.patch_merging = patch_merging + self.blocks2 = nn.ModuleList([ + Block_unit( + dim=embed_dim[1], + num_heads=num_heads[1], + mixer=mixer[depth[0]:depth[0] + depth[1]][i], + HW=HW, + local_mixer=local_mixer[1], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=act, + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0]:depth[0] + depth[1]][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[1]) + ]) + if patch_merging is not None: + self.sub_sample2 = SubSample( + embed_dim[1], + embed_dim[2], + sub_norm=sub_norm, + stride=[2, 1], + types=patch_merging) + HW = [self.HW[0] // 4, self.HW[1]] + else: + HW = self.HW + self.blocks3 = nn.ModuleList([ + Block_unit( + dim=embed_dim[2], + num_heads=num_heads[2], + mixer=mixer[depth[0] + depth[1]:][i], + HW=HW, + local_mixer=local_mixer[2], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=act, + attn_drop=attn_drop_rate, + drop_path=dpr[depth[0] + depth[1]:][i], + norm_layer=norm_layer, + epsilon=epsilon, + prenorm=prenorm) for i in range(depth[2]) + ]) + self.last_stage = last_stage + if last_stage: + self.avg_pool = nn.AdaptiveAvgPool2d([1, out_char_num]) + self.last_conv = nn.Conv2d( + in_channels=embed_dim[2], + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.hardswish = Activation('hard_swish', inplace=True) #nn.Hardswish() + # self.dropout = nn.Dropout(p=last_drop, mode="downscale_in_infer") + self.dropout = nn.Dropout(p=last_drop) + if not prenorm: + self.norm = eval(norm_layer)(embed_dim[-1], eps=epsilon) + self.use_lenhead = use_lenhead + if use_lenhead: + self.len_conv = nn.Linear(embed_dim[2], self.out_channels) + self.hardswish_len = Activation('hard_swish', inplace=True)# nn.Hardswish() + self.dropout_len = nn.Dropout( + p=last_drop) + + torch.nn.init.xavier_normal_(self.pos_embed) + self.apply(self._init_weights) + + def _init_weights(self, m): + # weight initialization + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def forward_features(self, x): + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks1: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample1( + x.permute(0, 2, 1).reshape( + [-1, self.embed_dim[0], self.HW[0], self.HW[1]])) + for blk in self.blocks2: + x = blk(x) + if self.patch_merging is not None: + x = self.sub_sample2( + x.permute(0, 2, 1).reshape( + [-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]])) + for blk in self.blocks3: + x = blk(x) + if not self.prenorm: + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.use_lenhead: + len_x = self.len_conv(x.mean(1)) + len_x = self.dropout_len(self.hardswish_len(len_x)) + if self.last_stage: + if self.patch_merging is not None: + h = self.HW[0] // 4 + else: + h = self.HW[0] + x = self.avg_pool( + x.permute(0, 2, 1).reshape( + [-1, self.embed_dim[2], h, self.HW[1]])) + x = self.last_conv(x) + x = self.hardswish(x) + x = self.dropout(x) + if self.use_lenhead: + return x, len_x + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/rec_vitstr.py b/modules/pytorchocr/modeling/backbones/rec_vitstr.py new file mode 100644 index 0000000..1e22501 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/rec_vitstr.py @@ -0,0 +1,120 @@ +""" +This code is refer from: +https://github.com/roatienza/deep-text-recognition-benchmark/blob/master/modules/vitstr.py +""" + +import numpy as np +import torch +import torch.nn as nn +from pytorchocr.modeling.backbones.rec_svtrnet import Block, PatchEmbed + +# import paddle +# import paddle.nn as nn +# from ppocr.modeling.backbones.rec_svtrnet import Block, PatchEmbed, zeros_, trunc_normal_, ones_ + +scale_dim_heads = {'tiny': [192, 3], 'small': [384, 6], 'base': [768, 12]} + + +class ViTSTR(nn.Module): + def __init__(self, + img_size=[224, 224], + in_channels=1, + scale='tiny', + seqlen=27, + patch_size=[16, 16], + embed_dim=None, + depth=12, + num_heads=None, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_path_rate=0., + drop_rate=0., + attn_drop_rate=0., + norm_layer='nn.LayerNorm', + act_layer='gelu', + epsilon=1e-6, + out_channels=None, + **kwargs): + super().__init__() + self.seqlen = seqlen + embed_dim = embed_dim if embed_dim is not None else scale_dim_heads[ + scale][0] + num_heads = num_heads if num_heads is not None else scale_dim_heads[ + scale][1] + out_channels = out_channels if out_channels is not None else embed_dim + self.patch_embed = PatchEmbed( + img_size=img_size, + in_channels=in_channels, + embed_dim=embed_dim, + patch_size=patch_size, + mode='linear') + num_patches = self.patch_embed.num_patches + + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = np.linspace(0, drop_path_rate, depth) + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + epsilon=epsilon, + prenorm=False) for i in range(depth) + ]) + self.norm = eval(norm_layer)(embed_dim, eps=epsilon) + + self.out_channels = out_channels + + torch.nn.init.xavier_normal_(self.pos_embed) + torch.nn.init.xavier_normal_(self.cls_token) + self.apply(self._init_weights) + + def _init_weights(self, m): + # weight initialization + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + # cls_tokens = paddle.tile(self.cls_token, repeat_times=[B, 1, 1]) + cls_tokens = self.cls_token.repeat(B, 1, 1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.pos_drop(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = x[:, :self.seqlen] + return x.permute(0, 2, 1).unsqueeze(2) diff --git a/modules/pytorchocr/modeling/backbones/table_mobilenet_v3.py b/modules/pytorchocr/modeling/backbones/table_mobilenet_v3.py new file mode 100644 index 0000000..fa61e05 --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/table_mobilenet_v3.py @@ -0,0 +1,270 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['MobileNetV3'] + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + +def hard_sigmoid(x, slope=0.1666667, offset=0.5,): + return torch.clamp(slope * x + offset, 0., 1.) + +def hard_swish(x, inplace=True): + return x * F.relu6(x + 3., inplace=inplace) / 6. + +class MobileNetV3(nn.Module): + def __init__(self, + in_channels=3, + model_name='large', + scale=0.5, + disable_se=False, + **kwargs): + """ + the MobilenetV3 backbone network for detection module. + Args: + params(dict): the super parameters for build network + """ + super(MobileNetV3, self).__init__() + + self.disable_se = disable_se + + if model_name == "large": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, 'relu', 1], + [3, 64, 24, False, 'relu', 2], + [3, 72, 24, False, 'relu', 1], + [5, 72, 40, True, 'relu', 2], + [5, 120, 40, True, 'relu', 1], + [5, 120, 40, True, 'relu', 1], + [3, 240, 80, False, 'hardswish', 2], + [3, 200, 80, False, 'hardswish', 1], + [3, 184, 80, False, 'hardswish', 1], + [3, 184, 80, False, 'hardswish', 1], + [3, 480, 112, True, 'hardswish', 1], + [3, 672, 112, True, 'hardswish', 1], + [5, 672, 160, True, 'hardswish', 2], + [5, 960, 160, True, 'hardswish', 1], + [5, 960, 160, True, 'hardswish', 1], + ] + cls_ch_squeeze = 960 + elif model_name == "small": + cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, 'relu', 2], + [3, 72, 24, False, 'relu', 2], + [3, 88, 24, False, 'relu', 1], + [5, 96, 40, True, 'hardswish', 2], + [5, 240, 40, True, 'hardswish', 1], + [5, 240, 40, True, 'hardswish', 1], + [5, 120, 48, True, 'hardswish', 1], + [5, 144, 48, True, 'hardswish', 1], + [5, 288, 96, True, 'hardswish', 2], + [5, 576, 96, True, 'hardswish', 1], + [5, 576, 96, True, 'hardswish', 1], + ] + cls_ch_squeeze = 576 + else: + raise NotImplementedError("mode[" + model_name + + "_model] is not implemented!") + + supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25] + assert scale in supported_scale, \ + "supported scale are {} but input scale is {}".format(supported_scale, scale) + inplanes = 16 + # conv1 + self.conv = ConvBNLayer( + in_channels=in_channels, + out_channels=make_divisible(inplanes * scale), + kernel_size=3, + stride=2, + padding=1, + groups=1, + if_act=True, + act='hardswish', + name='conv1') + + self.stages = nn.ModuleList() + self.out_channels = [] + block_list = [] + i = 0 + inplanes = make_divisible(inplanes * scale) + for (k, exp, c, se, nl, s) in cfg: + se = se and not self.disable_se + start_idx = 2 if model_name == 'large' else 0 + if s == 2 and i > start_idx: + self.out_channels.append(inplanes) + self.stages.append(nn.Sequential(*block_list)) + block_list = [] + block_list.append( + ResidualUnit( + in_channels=inplanes, + mid_channels=make_divisible(scale * exp), + out_channels=make_divisible(scale * c), + kernel_size=k, + stride=s, + use_se=se, + act=nl, + name="conv" + str(i + 2))) + inplanes = make_divisible(scale * c) + i += 1 + block_list.append( + ConvBNLayer( + in_channels=inplanes, + out_channels=make_divisible(scale * cls_ch_squeeze), + kernel_size=1, + stride=1, + padding=0, + groups=1, + if_act=True, + act='hardswish', + name='conv_last')) + self.stages.append(nn.Sequential(*block_list)) + self.out_channels.append(make_divisible(scale * cls_ch_squeeze)) + # for i, stage in enumerate(self.stages): + # self.add_module(module=stage, name="stage{}".format(i)) + + def forward(self, x): + x = self.conv(x) + out_list = [] + for stage in self.stages: + x = stage(x) + out_list.append(x) + return out_list + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels, + ) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + if self.act == "relu": + x = F.relu(x) + elif self.act == "hardswish": + x = hard_swish(x) + else: + print("The activation function({}) is selected incorrectly.". + format(self.act)) + exit() + return x + + +class ResidualUnit(nn.Module): + def __init__(self, + in_channels, + mid_channels, + out_channels, + kernel_size, + stride, + use_se, + act=None, + name=''): + super(ResidualUnit, self).__init__() + self.if_shortcut = stride == 1 and in_channels == out_channels + self.if_se = use_se + + self.expand_conv = ConvBNLayer( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + if_act=True, + act=act, + name=name + "_expand") + self.bottleneck_conv = ConvBNLayer( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=int((kernel_size - 1) // 2), + groups=mid_channels, + if_act=True, + act=act, + name=name + "_depthwise") + if self.if_se: + self.mid_se = SEModule(mid_channels, name=name + "_se") + self.linear_conv = ConvBNLayer( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + if_act=False, + act=None, + name=name + "_linear") + + def forward(self, inputs): + x = self.expand_conv(inputs) + x = self.bottleneck_conv(x) + if self.if_se: + x = self.mid_se(x) + x = self.linear_conv(x) + if self.if_shortcut: + x = torch.add(inputs, x) + return x + + +class SEModule(nn.Module): + def __init__(self, in_channels, reduction=4, name=""): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv1 = nn.Conv2d( + in_channels=in_channels, + out_channels=in_channels // reduction, + kernel_size=1, + stride=1, + padding=0, + bias=True) + self.conv2 = nn.Conv2d( + in_channels=in_channels // reduction, + out_channels=in_channels, + kernel_size=1, + stride=1, + padding=0, + bias=True) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = F.relu(outputs) + outputs = self.conv2(outputs) + outputs = hard_sigmoid(outputs, slope=0.2, offset=0.5) + return inputs * outputs \ No newline at end of file diff --git a/modules/pytorchocr/modeling/backbones/table_resnet_vd.py b/modules/pytorchocr/modeling/backbones/table_resnet_vd.py new file mode 100644 index 0000000..77bfe4a --- /dev/null +++ b/modules/pytorchocr/modeling/backbones/table_resnet_vd.py @@ -0,0 +1,269 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + +__all__ = ["ResNet"] + + +class ConvBNLayer(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None, ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = nn.BatchNorm2d( + out_channels, + ) + self.act = act + if self.act is not None: + self._act = Activation(act, inplace=True) + + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class BottleneckBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + name=None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = torch.add(short, conv1) + y = F.relu(y) + return y + + +class ResNet(nn.Module): + def __init__(self, in_channels=3, layers=50, **kwargs): + super(ResNet, self).__init__() + + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, + 1024] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + self.conv1_1 = ConvBNLayer( + in_channels=in_channels, + out_channels=32, + kernel_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.stages = nn.ModuleList() + self.out_channels = [] + if layers >= 50: + for block in range(len(depth)): + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + bottleneck_block = BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name + ) + shortcut = True + # block_list.append(bottleneck_block) + block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block) + self.out_channels.append(num_filters[block] * 4) + # self.stages.append(nn.Sequential(*block_list)) + self.stages.append(block_list) + else: + for block in range(len(depth)): + block_list = nn.Sequential() + shortcut = False + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name + ) + shortcut = True + # block_list.append(basic_block) + block_list.add_module('bb_%d_%d' % (block, i), basic_block) + self.out_channels.append(num_filters[block]) + # self.stages.append(nn.Sequential(*block_list)) + self.stages.append(block_list) + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + out = [] + for block in self.stages: + y = block(y) + out.append(y) + return out \ No newline at end of file diff --git a/modules/pytorchocr/modeling/common.py b/modules/pytorchocr/modeling/common.py new file mode 100644 index 0000000..2d6e3d9 --- /dev/null +++ b/modules/pytorchocr/modeling/common.py @@ -0,0 +1,73 @@ + + +import torch +import torch.nn as nn +import torch.nn.functional as F + +class Hswish(nn.Module): + def __init__(self, inplace=True): + super(Hswish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x * F.relu6(x + 3., inplace=self.inplace) / 6. + +# out = max(0, min(1, slop*x+offset)) +# paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) +class Hsigmoid(nn.Module): + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + # torch: F.relu6(x + 3., inplace=self.inplace) / 6. + # paddle: F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + return F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. + +class GELU(nn.Module): + def __init__(self, inplace=True): + super(GELU, self).__init__() + self.inplace = inplace + + def forward(self, x): + return torch.nn.functional.gelu(x) + + +class Swish(nn.Module): + def __init__(self, inplace=True): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + if self.inplace: + x.mul_(torch.sigmoid(x)) + return x + else: + return x*torch.sigmoid(x) + + +class Activation(nn.Module): + def __init__(self, act_type, inplace=True): + super(Activation, self).__init__() + act_type = act_type.lower() + if act_type == 'relu': + self.act = nn.ReLU(inplace=inplace) + elif act_type == 'relu6': + self.act = nn.ReLU6(inplace=inplace) + elif act_type == 'sigmoid': + raise NotImplementedError + elif act_type == 'hard_sigmoid': + self.act = Hsigmoid(inplace)#nn.Hardsigmoid(inplace=inplace)#Hsigmoid(inplace)# + elif act_type == 'hard_swish' or act_type == 'hswish': + self.act = Hswish(inplace=inplace) + elif act_type == 'leakyrelu': + self.act = nn.LeakyReLU(inplace=inplace) + elif act_type == 'gelu': + self.act = GELU(inplace=inplace) + elif act_type == 'swish': + self.act = Swish(inplace=inplace) + else: + raise NotImplementedError + + def forward(self, inputs): + return self.act(inputs) \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/__init__.py b/modules/pytorchocr/modeling/heads/__init__.py new file mode 100644 index 0000000..ca9fe89 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/__init__.py @@ -0,0 +1,51 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['build_head'] + + +def build_head(config, **kwargs): + # det head + from .det_db_head import DBHead, PFHeadLocal + from .det_east_head import EASTHead + from .det_sast_head import SASTHead + from .det_pse_head import PSEHead + from .det_fce_head import FCEHead + from .e2e_pg_head import PGHead + + # rec head + from .rec_ctc_head import CTCHead + from .rec_att_head import AttentionHead + from .rec_srn_head import SRNHead + from .rec_nrtr_head import Transformer + from .rec_sar_head import SARHead + from .rec_can_head import CANHead + from .rec_multi_head import MultiHead + + # cls head + from .cls_head import ClsHead + support_dict = [ + 'DBHead', 'PSEHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'AttentionHead', + 'SRNHead', 'PGHead', 'Transformer', 'TableAttentionHead','SARHead', 'FCEHead', + 'CANHead', 'MultiHead', 'PFHeadLocal', + + ] + + from .table_att_head import TableAttentionHead + + module_name = config.pop('name') + assert module_name in support_dict, Exception('head only support {}'.format( + support_dict)) + module_class = eval(module_name)(**config, **kwargs) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/cls_head.py b/modules/pytorchocr/modeling/heads/cls_head.py new file mode 100644 index 0000000..476a212 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/cls_head.py @@ -0,0 +1,28 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F + +class ClsHead(nn.Module): + """ + Class orientation + Args: + params(dict): super parameters for build Class network + """ + + def __init__(self, in_channels, class_dim, **kwargs): + super(ClsHead, self).__init__() + self.training = False + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear( + in_channels, + class_dim, + bias=True) + + def forward(self, x): + x = self.pool(x) + x = torch.reshape(x, shape=[x.shape[0], x.shape[1]]) + x = self.fc(x) + if not self.training: + x = F.softmax(x, dim=1) + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/det_db_head.py b/modules/pytorchocr/modeling/heads/det_db_head.py new file mode 100644 index 0000000..dbefbb4 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/det_db_head.py @@ -0,0 +1,121 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +from pytorchocr.modeling.backbones.det_mobilenet_v3 import ConvBNLayer + +class Head(nn.Module): + def __init__(self, in_channels, **kwargs): + super(Head, self).__init__() + self.conv1 = nn.Conv2d( + in_channels=in_channels, + out_channels=in_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.conv_bn1 = nn.BatchNorm2d( + in_channels // 4) + self.relu1 = Activation(act_type='relu') + + self.conv2 = nn.ConvTranspose2d( + in_channels=in_channels // 4, + out_channels=in_channels // 4, + kernel_size=2, + stride=2) + self.conv_bn2 = nn.BatchNorm2d( + in_channels // 4) + self.relu2 = Activation(act_type='relu') + + self.conv3 = nn.ConvTranspose2d( + in_channels=in_channels // 4, + out_channels=1, + kernel_size=2, + stride=2) + + def forward(self, x, return_f=False): + x = self.conv1(x) + x = self.conv_bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.conv_bn2(x) + x = self.relu2(x) + if return_f is True: + f = x + x = self.conv3(x) + x = torch.sigmoid(x) + if return_f is True: + return x, f + return x + + +class DBHead(nn.Module): + """ + Differentiable Binarization (DB) for text detection: + see https://arxiv.org/abs/1911.08947 + args: + params(dict): super parameters for build DB network + """ + + def __init__(self, in_channels, k=50, **kwargs): + super(DBHead, self).__init__() + self.k = k + binarize_name_list = [ + 'conv2d_56', 'batch_norm_47', 'conv2d_transpose_0', 'batch_norm_48', + 'conv2d_transpose_1', 'binarize' + ] + thresh_name_list = [ + 'conv2d_57', 'batch_norm_49', 'conv2d_transpose_2', 'batch_norm_50', + 'conv2d_transpose_3', 'thresh' + ] + self.binarize = Head(in_channels, **kwargs)# binarize_name_list) + self.thresh = Head(in_channels, **kwargs)#thresh_name_list) + + def step_function(self, x, y): + return torch.reciprocal(1 + torch.exp(-self.k * (x - y))) + + def forward(self, x): + shrink_maps = self.binarize(x) + if not self.training: + return {'maps': shrink_maps} + + threshold_maps = self.thresh(x) + binary_maps = self.step_function(shrink_maps, threshold_maps) + y = torch.cat([shrink_maps, threshold_maps, binary_maps], dim=1) + return {'maps': y} + +class LocalModule(nn.Module): + def __init__(self, in_c, mid_c, use_distance=True): + super(self.__class__, self).__init__() + self.last_3 = ConvBNLayer(in_c + 1, mid_c, 3, 1, 1, act='relu') + self.last_1 = nn.Conv2d(mid_c, 1, 1, 1, 0) + + def forward(self, x, init_map, distance_map): + outf = torch.cat([init_map, x], dim=1) + # last Conv + out = self.last_1(self.last_3(outf)) + return out + +class PFHeadLocal(DBHead): + def __init__(self, in_channels, k=50, mode='small', **kwargs): + super(PFHeadLocal, self).__init__(in_channels, k, **kwargs) + self.mode = mode + + self.up_conv = nn.Upsample(scale_factor=2, mode="nearest") + if self.mode == 'large': + self.cbn_layer = LocalModule(in_channels // 4, in_channels // 4) + elif self.mode == 'small': + self.cbn_layer = LocalModule(in_channels // 4, in_channels // 8) + + def forward(self, x, targets=None): + shrink_maps, f = self.binarize(x, return_f=True) + base_maps = shrink_maps + cbn_maps = self.cbn_layer(self.up_conv(f), shrink_maps, None) + cbn_maps = F.sigmoid(cbn_maps) + if not self.training: + return {'maps': 0.5 * (base_maps + cbn_maps), 'cbn_maps': cbn_maps} + + threshold_maps = self.thresh(x) + binary_maps = self.step_function(shrink_maps, threshold_maps) + y = torch.cat([cbn_maps, threshold_maps, binary_maps], dim=1) + return {'maps': y, 'distance_maps': cbn_maps, 'cbn_maps': binary_maps} \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/det_east_head.py b/modules/pytorchocr/modeling/heads/det_east_head.py new file mode 100644 index 0000000..0df9c48 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/det_east_head.py @@ -0,0 +1,112 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import nn +# import paddle.nn.functional as F +# from paddle import ParamAttr + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels,) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class EASTHead(nn.Module): + """ + """ + def __init__(self, in_channels, model_name, **kwargs): + super(EASTHead, self).__init__() + self.model_name = model_name + if self.model_name == "large": + num_outputs = [128, 64, 1, 8] + else: + num_outputs = [64, 32, 1, 8] + + self.det_conv1 = ConvBNLayer( + in_channels=in_channels, + out_channels=num_outputs[0], + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="det_head1") + self.det_conv2 = ConvBNLayer( + in_channels=num_outputs[0], + out_channels=num_outputs[1], + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="det_head2") + self.score_conv = ConvBNLayer( + in_channels=num_outputs[1], + out_channels=num_outputs[2], + kernel_size=1, + stride=1, + padding=0, + if_act=False, + act=None, + name="f_score") + self.geo_conv = ConvBNLayer( + in_channels=num_outputs[1], + out_channels=num_outputs[3], + kernel_size=1, + stride=1, + padding=0, + if_act=False, + act=None, + name="f_geo") + + def forward(self, x): + f_det = self.det_conv1(x) + f_det = self.det_conv2(f_det) + f_score = self.score_conv(f_det) + f_score = torch.sigmoid(f_score) + f_geo = self.geo_conv(f_det) + f_geo = (torch.sigmoid(f_geo) - 0.5) * 2 * 800 + + pred = {'f_score': f_score, 'f_geo': f_geo} + return pred \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/det_fce_head.py b/modules/pytorchocr/modeling/heads/det_fce_head.py new file mode 100644 index 0000000..22773c9 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/det_fce_head.py @@ -0,0 +1,82 @@ +""" +This code is refer from: +https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textdet/dense_heads/fce_head.py +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +# from paddle import nn +# from paddle import ParamAttr +# import paddle.nn.functional as F +# from paddle.nn.initializer import Normal +# import paddle +from functools import partial + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +class FCEHead(nn.Module): + """The class for implementing FCENet head. + FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped Text + Detection. + + [https://arxiv.org/abs/2104.10442] + + Args: + in_channels (int): The number of input channels. + scales (list[int]) : The scale of each layer. + fourier_degree (int) : The maximum Fourier transform degree k. + """ + + def __init__(self, in_channels, fourier_degree=5): + super().__init__() + assert isinstance(in_channels, int) + + self.downsample_ratio = 1.0 + self.in_channels = in_channels + self.fourier_degree = fourier_degree + self.out_channels_cls = 4 + self.out_channels_reg = (2 * self.fourier_degree + 1) * 2 + + self.out_conv_cls = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels_cls, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=True) + self.out_conv_reg = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels_reg, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=True) + + def forward(self, feats, targets=None): + cls_res, reg_res = multi_apply(self.forward_single, feats) + level_num = len(cls_res) + outs = {} + if not self.training: + for i in range(level_num): + tr_pred = F.softmax(cls_res[i][:, 0:2, :, :], dim=1) + tcl_pred = F.softmax(cls_res[i][:, 2:, :, :], dim=1) + outs['level_{}'.format(i)] = torch.cat( + [tr_pred, tcl_pred, reg_res[i]], dim=1) + else: + preds = [[cls_res[i], reg_res[i]] for i in range(level_num)] + outs['levels'] = preds + return outs + + def forward_single(self, x): + cls_predict = self.out_conv_cls(x) + reg_predict = self.out_conv_reg(x) + return cls_predict, reg_predict diff --git a/modules/pytorchocr/modeling/heads/det_pse_head.py b/modules/pytorchocr/modeling/heads/det_pse_head.py new file mode 100644 index 0000000..c032212 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/det_pse_head.py @@ -0,0 +1,25 @@ +""" +This code is refer from: +https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py +""" + +# from paddle import nn +from torch import nn + + +class PSEHead(nn.Module): + def __init__(self, in_channels, hidden_dim=256, out_channels=7, **kwargs): + super(PSEHead, self).__init__() + self.conv1 = nn.Conv2d( + in_channels, hidden_dim, kernel_size=3, stride=1, padding=1) + self.bn1 = nn.BatchNorm2d(hidden_dim) + self.relu1 = nn.ReLU() + + self.conv2 = nn.Conv2d( + hidden_dim, out_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x, **kwargs): + out = self.conv1(x) + out = self.relu1(self.bn1(out)) + out = self.conv2(out) + return {'maps': out} diff --git a/modules/pytorchocr/modeling/heads/det_sast_head.py b/modules/pytorchocr/modeling/heads/det_sast_head.py new file mode 100644 index 0000000..5b104e6 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/det_sast_head.py @@ -0,0 +1,118 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import nn +# import paddle.nn.functional as F +# from paddle import ParamAttr + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels,) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class SAST_Header1(nn.Module): + def __init__(self, in_channels, **kwargs): + super(SAST_Header1, self).__init__() + out_channels = [64, 64, 128] + self.score_conv = nn.Sequential( + ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_score1'), + ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_score2'), + ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_score3'), + ConvBNLayer(out_channels[2], 1, 3, 1, act=None, name='f_score4') + ) + self.border_conv = nn.Sequential( + ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_border1'), + ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_border2'), + ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_border3'), + ConvBNLayer(out_channels[2], 4, 3, 1, act=None, name='f_border4') + ) + + def forward(self, x): + f_score = self.score_conv(x) + f_score = torch.sigmoid(f_score) + f_border = self.border_conv(x) + return f_score, f_border + + +class SAST_Header2(nn.Module): + def __init__(self, in_channels, **kwargs): + super(SAST_Header2, self).__init__() + out_channels = [64, 64, 128] + self.tvo_conv = nn.Sequential( + ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tvo1'), + ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tvo2'), + ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tvo3'), + ConvBNLayer(out_channels[2], 8, 3, 1, act=None, name='f_tvo4') + ) + self.tco_conv = nn.Sequential( + ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tco1'), + ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tco2'), + ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tco3'), + ConvBNLayer(out_channels[2], 2, 3, 1, act=None, name='f_tco4') + ) + + def forward(self, x): + f_tvo = self.tvo_conv(x) + f_tco = self.tco_conv(x) + return f_tvo, f_tco + + +class SASTHead(nn.Module): + """ + """ + def __init__(self, in_channels, **kwargs): + super(SASTHead, self).__init__() + + self.head1 = SAST_Header1(in_channels) + self.head2 = SAST_Header2(in_channels) + + def forward(self, x): + f_score, f_border = self.head1(x) + f_tvo, f_tco = self.head2(x) + + predicts = {} + predicts['f_score'] = f_score + predicts['f_border'] = f_border + predicts['f_tvo'] = f_tvo + predicts['f_tco'] = f_tco + return predicts \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/e2e_pg_head.py b/modules/pytorchocr/modeling/heads/e2e_pg_head.py new file mode 100644 index 0000000..e3e2130 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/e2e_pg_head.py @@ -0,0 +1,234 @@ + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d(out_channels) + self.act = act + if self.act is not None: + self._act = Activation(act_type=self.act, inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class PGHead(nn.Module): + """ + """ + + def __init__(self, in_channels, **kwargs): + super(PGHead, self).__init__() + self.conv_f_score1 = ConvBNLayer( + in_channels=in_channels, + out_channels=64, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_score{}".format(1)) + self.conv_f_score2 = ConvBNLayer( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + padding=1, + act='relu', + name="conv_f_score{}".format(2)) + self.conv_f_score3 = ConvBNLayer( + in_channels=64, + out_channels=128, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_score{}".format(3)) + + self.conv1 = nn.Conv2d( + in_channels=128, + out_channels=1, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False) + + self.conv_f_boder1 = ConvBNLayer( + in_channels=in_channels, + out_channels=64, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_boder{}".format(1)) + self.conv_f_boder2 = ConvBNLayer( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + padding=1, + act='relu', + name="conv_f_boder{}".format(2)) + self.conv_f_boder3 = ConvBNLayer( + in_channels=64, + out_channels=128, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_boder{}".format(3)) + self.conv2 = nn.Conv2d( + in_channels=128, + out_channels=4, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False) + self.conv_f_char1 = ConvBNLayer( + in_channels=in_channels, + out_channels=128, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_char{}".format(1)) + self.conv_f_char2 = ConvBNLayer( + in_channels=128, + out_channels=128, + kernel_size=3, + stride=1, + padding=1, + act='relu', + name="conv_f_char{}".format(2)) + self.conv_f_char3 = ConvBNLayer( + in_channels=128, + out_channels=256, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_char{}".format(3)) + self.conv_f_char4 = ConvBNLayer( + in_channels=256, + out_channels=256, + kernel_size=3, + stride=1, + padding=1, + act='relu', + name="conv_f_char{}".format(4)) + self.conv_f_char5 = ConvBNLayer( + in_channels=256, + out_channels=256, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_char{}".format(5)) + self.conv3 = nn.Conv2d( + in_channels=256, + out_channels=37, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False) + + self.conv_f_direc1 = ConvBNLayer( + in_channels=in_channels, + out_channels=64, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_direc{}".format(1)) + self.conv_f_direc2 = ConvBNLayer( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + padding=1, + act='relu', + name="conv_f_direc{}".format(2)) + self.conv_f_direc3 = ConvBNLayer( + in_channels=64, + out_channels=128, + kernel_size=1, + stride=1, + padding=0, + act='relu', + name="conv_f_direc{}".format(3)) + self.conv4 = nn.Conv2d( + in_channels=128, + out_channels=2, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False) + + def forward(self, x): + f_score = self.conv_f_score1(x) + f_score = self.conv_f_score2(f_score) + f_score = self.conv_f_score3(f_score) + f_score = self.conv1(f_score) + f_score = torch.sigmoid(f_score) + + # f_border + f_border = self.conv_f_boder1(x) + f_border = self.conv_f_boder2(f_border) + f_border = self.conv_f_boder3(f_border) + f_border = self.conv2(f_border) + + f_char = self.conv_f_char1(x) + f_char = self.conv_f_char2(f_char) + f_char = self.conv_f_char3(f_char) + f_char = self.conv_f_char4(f_char) + f_char = self.conv_f_char5(f_char) + f_char = self.conv3(f_char) + + f_direction = self.conv_f_direc1(x) + f_direction = self.conv_f_direc2(f_direction) + f_direction = self.conv_f_direc3(f_direction) + f_direction = self.conv4(f_direction) + + predicts = {} + predicts['f_score'] = f_score + predicts['f_border'] = f_border + predicts['f_char'] = f_char + predicts['f_direction'] = f_direction + return predicts diff --git a/modules/pytorchocr/modeling/heads/multiheadAttention.py b/modules/pytorchocr/modeling/heads/multiheadAttention.py new file mode 100644 index 0000000..cbd887c --- /dev/null +++ b/modules/pytorchocr/modeling/heads/multiheadAttention.py @@ -0,0 +1,150 @@ +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn import Linear +from torch.nn.init import xavier_uniform_ + + +class MultiheadAttention(nn.Module): + """Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model + num_heads: parallel attention layers, or heads + + """ + + def __init__(self, + embed_dim, + num_heads, + dropout=0., + bias=True, + add_bias_kv=False, + add_zero_attn=False): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + self.scaling = self.head_dim**-0.5 + self.out_proj = Linear(embed_dim, embed_dim, bias=bias) + self._reset_parameters() + self.conv1 = torch.nn.Conv2d( + in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) + self.conv2 = torch.nn.Conv2d( + in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) + self.conv3 = torch.nn.Conv2d( + in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1)) + + def _reset_parameters(self): + xavier_uniform_(self.out_proj.weight) + + def forward(self, + query, + key, + value, + key_padding_mask=None, + incremental_state=None, + attn_mask=None): + """ + Inputs of forward function + query: [target length, batch size, embed dim] + key: [sequence length, batch size, embed dim] + value: [sequence length, batch size, embed dim] + key_padding_mask: if True, mask padding based on batch size + incremental_state: if provided, previous time steps are cashed + need_weights: output attn_output_weights + static_kv: key and value are static + + Outputs of forward function + attn_output: [target length, batch size, embed dim] + attn_output_weights: [batch size, target length, sequence length] + """ + q_shape = query.shape + src_shape = key.shape + q = self._in_proj_q(query) + k = self._in_proj_k(key) + v = self._in_proj_v(value) + q *= self.scaling + # q = paddle.transpose( + # paddle.reshape( + # q, [q_shape[0], q_shape[1], self.num_heads, self.head_dim]), + # [1, 2, 0, 3]) + q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self.head_dim)) + q = q.permute(1, 2, 0, 3) + # k = paddle.transpose( + # paddle.reshape( + # k, [src_shape[0], q_shape[1], self.num_heads, self.head_dim]), + # [1, 2, 0, 3]) + k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) + k = k.permute(1, 2, 0, 3) + # v = paddle.transpose( + # paddle.reshape( + # v, [src_shape[0], q_shape[1], self.num_heads, self.head_dim]), + # [1, 2, 0, 3]) + v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim)) + v = v.permute(1, 2, 0, 3) + if key_padding_mask is not None: + assert key_padding_mask.shape[0] == q_shape[1] + assert key_padding_mask.shape[1] == src_shape[0] + attn_output_weights = torch.matmul(q, + k.permute(0, 1, 3, 2)) + if attn_mask is not None: + attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0) + attn_output_weights += attn_mask + if key_padding_mask is not None: + attn_output_weights = torch.reshape( + attn_output_weights, + [q_shape[1], self.num_heads, q_shape[0], src_shape[0]]) + key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2) + key = key.type(torch.float32) + y = torch.full( + size=key.shape, fill_value=float("-Inf"), dtype=torch.float32) + y = torch.where(key == 0., key, y) + attn_output_weights += y + attn_output_weights = F.softmax( + attn_output_weights.type(torch.float32), + dim=-1, + dtype=torch.float32 if attn_output_weights.dtype == torch.float16 + else attn_output_weights.dtype) + attn_output_weights = F.dropout( + attn_output_weights, p=self.dropout, training=self.training) + + attn_output = torch.matmul(attn_output_weights, v) + attn_output = torch.reshape( + attn_output.permute(2, 0, 1, 3), + [q_shape[0], q_shape[1], self.embed_dim]) + attn_output = self.out_proj(attn_output) + + return attn_output + + def _in_proj_q(self, query): + query = query.permute(1, 2, 0) + query = torch.unsqueeze(query, dim=2) + res = self.conv1(query) + res = torch.squeeze(res, dim=2) + res = res.permute(2, 0, 1) + return res + + def _in_proj_k(self, key): + key = key.permute(1, 2, 0) + key = torch.unsqueeze(key, dim=2) + res = self.conv2(key) + res = torch.squeeze(res, dim=2) + res = res.permute(2, 0, 1) + return res + + def _in_proj_v(self, value): + value = value.permute(1, 2, 0) #(1, 2, 0) + value = torch.unsqueeze(value, dim=2) + res = self.conv3(value) + res = torch.squeeze(res, dim=2) + res = res.permute(2, 0, 1) + return res diff --git a/modules/pytorchocr/modeling/heads/rec_att_head.py b/modules/pytorchocr/modeling/heads/rec_att_head.py new file mode 100644 index 0000000..a057c52 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_att_head.py @@ -0,0 +1,190 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + + +class AttentionHead(nn.Module): + def __init__(self, in_channels, out_channels, hidden_size, **kwargs): + super(AttentionHead, self).__init__() + self.input_size = in_channels + self.hidden_size = hidden_size + self.num_classes = out_channels + + self.attention_cell = AttentionGRUCell( + in_channels, hidden_size, out_channels, use_gru=False) + self.generator = nn.Linear(hidden_size, out_channels) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None, batch_max_length=25): + batch_size = inputs.size()[0] + num_steps = batch_max_length + + hidden = torch.zeros((batch_size, self.hidden_size)) + output_hiddens = [] + + if targets is not None: + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets[:, i], onehot_dim=self.num_classes) + (outputs, hidden), alpha = self.attention_cell(hidden, inputs, + char_onehots) + output_hiddens.append(torch.unsqueeze(outputs, dim=1)) + output = torch.cat(output_hiddens, dim=1) + probs = self.generator(output) + + else: + targets = torch.zeros([batch_size], dtype=torch.int32) + probs = None + char_onehots = None + outputs = None + alpha = None + + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets, onehot_dim=self.num_classes) + (outputs, hidden), alpha = self.attention_cell(hidden, inputs, + char_onehots) + probs_step = self.generator(outputs) + if probs is None: + probs = torch.unsqueeze(probs_step, dim=1) + else: + probs = torch.cat( + [probs, torch.unsqueeze( + probs_step, dim=1)], dim=1) + next_input = probs_step.argmax(dim=1) + targets = next_input + + return probs + + +class AttentionGRUCell(nn.Module): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionGRUCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias=False) + + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size, bias=True) + + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden), dim=1) + + res = torch.add(batch_H_proj, prev_hidden_proj) + res = torch.tanh(res) + e = self.score(res) + + alpha = F.softmax(e, dim=1) + alpha = alpha.permute(0, 2, 1) + context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1) + concat_context = torch.cat([context, char_onehots.float()], 1) + + cur_hidden = self.rnn(concat_context, prev_hidden) + + return (cur_hidden, cur_hidden), alpha + + +class AttentionLSTM(nn.Module): + def __init__(self, in_channels, out_channels, hidden_size, **kwargs): + super(AttentionLSTM, self).__init__() + self.input_size = in_channels + self.hidden_size = hidden_size + self.num_classes = out_channels + + self.attention_cell = AttentionLSTMCell( + in_channels, hidden_size, out_channels, use_gru=False) + self.generator = nn.Linear(hidden_size, out_channels) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None, batch_max_length=25): + batch_size = inputs.shape[0] + num_steps = batch_max_length + + hidden = (torch.zeros((batch_size, self.hidden_size)), torch.zeros( + (batch_size, self.hidden_size))) + output_hiddens = [] + + if targets is not None: + for i in range(num_steps): + # one-hot vectors for a i-th char + char_onehots = self._char_to_onehot( + targets[:, i], onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + + hidden = (hidden[1][0], hidden[1][1]) + output_hiddens.append(torch.unsqueeze(hidden[0], dim=1)) + output = torch.cat(output_hiddens, dim=1) + probs = self.generator(output) + + else: + targets = torch.zeros([batch_size], dtype=torch.int32) + probs = None + + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets, onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + probs_step = self.generator(hidden[0]) + hidden = (hidden[1][0], hidden[1][1]) + if probs is None: + probs = torch.unsqueeze(probs_step, dim=1) + else: + probs = torch.cat( + [probs, torch.unsqueeze( + probs_step, dim=1)], dim=1) + + next_input = probs_step.argmax(dim=1) + + targets = next_input + + return probs + + +class AttentionLSTMCell(nn.Module): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionLSTMCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias=False) + if not use_gru: + self.rnn = nn.LSTMCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + else: + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden[0]), dim=1) + res = torch.add(batch_H_proj, prev_hidden_proj) + res = torch.tanh(res) + e = self.score(res) + + alpha = F.softmax(e, dim=1) + alpha = alpha.permute(0, 2, 1) + context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1) + concat_context = torch.cat([context, char_onehots.float()], 1) + + cur_hidden = self.rnn(concat_context, prev_hidden) + + return cur_hidden, alpha diff --git a/modules/pytorchocr/modeling/heads/rec_can_head.py b/modules/pytorchocr/modeling/heads/rec_can_head.py new file mode 100644 index 0000000..4d097f5 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_can_head.py @@ -0,0 +1,288 @@ +""" +This code is refer from: +https://github.com/LBH1024/CAN/models/can.py +https://github.com/LBH1024/CAN/models/counting.py +https://github.com/LBH1024/CAN/models/decoder.py +https://github.com/LBH1024/CAN/models/attention.py + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch.nn as nn +import torch +import math +''' +Counting Module +''' + + +class ChannelAtt(nn.Module): + def __init__(self, channel, reduction): + super(ChannelAtt, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), + nn.ReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid()) + + def forward(self, x): + b, c, _, _ = x.shape + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + return x * y + + +class CountingDecoder(nn.Module): + def __init__(self, in_channel, out_channel, kernel_size): + super(CountingDecoder, self).__init__() + self.in_channel = in_channel + self.out_channel = out_channel + + self.trans_layer = nn.Sequential( + nn.Conv2d( + self.in_channel, + 512, + kernel_size=kernel_size, + padding=kernel_size // 2, + bias=False), + nn.BatchNorm2d(512)) + + self.channel_att = ChannelAtt(512, 16) + + self.pred_layer = nn.Sequential( + nn.Conv2d( + 512, self.out_channel, kernel_size=1, bias=False), + nn.Sigmoid()) + + def forward(self, x, mask): + b, _, h, w = x.shape + x = self.trans_layer(x) + x = self.channel_att(x) + x = self.pred_layer(x) + + if mask is not None: + x = x * mask + x = x.view(b, self.out_channel, -1) + x1 = torch.sum(x, dim=-1) + + return x1, x.view(b, self.out_channel, h, w) + + +''' +Attention Decoder +''' + + +class PositionEmbeddingSine(nn.Module): + def __init__(self, + num_pos_feats=64, + temperature=10000, + normalize=False, + scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, x, mask): + y_embed = mask.cumsum(1, dtype=torch.float32) + x_embed = mask.cumsum(2, dtype=torch.float32) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + # dim_d = paddle.expand(paddle.to_tensor(2), dim_t.shape) + # dim_t = self.temperature**(2 * (dim_t / dim_d).astype('int64') / + # self.num_pos_feats) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = torch.unsqueeze(x_embed, 3) / dim_t + pos_y = torch.unsqueeze(y_embed, 3) / dim_t + + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + + return pos + + +class AttDecoder(nn.Module): + def __init__(self, ratio, is_train, input_size, hidden_size, + encoder_out_channel, dropout, dropout_ratio, word_num, + counting_decoder_out_channel, attention): + super(AttDecoder, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.out_channel = encoder_out_channel + self.attention_dim = attention['attention_dim'] + self.dropout_prob = dropout + self.ratio = ratio + self.word_num = word_num + + self.counting_num = counting_decoder_out_channel + self.is_train = is_train + + self.init_weight = nn.Linear(self.out_channel, self.hidden_size) + self.embedding = nn.Embedding(self.word_num, self.input_size) + self.word_input_gru = nn.GRUCell(self.input_size, self.hidden_size) + self.word_attention = Attention(hidden_size, attention['attention_dim']) + + self.encoder_feature_conv = nn.Conv2d( + self.out_channel, + self.attention_dim, + kernel_size=attention['word_conv_kernel'], + padding=attention['word_conv_kernel'] // 2) + + self.word_state_weight = nn.Linear(self.hidden_size, self.hidden_size) + self.word_embedding_weight = nn.Linear(self.input_size, + self.hidden_size) + self.word_context_weight = nn.Linear(self.out_channel, self.hidden_size) + self.counting_context_weight = nn.Linear(self.counting_num, + self.hidden_size) + self.word_convert = nn.Linear(self.hidden_size, self.word_num) + + if dropout: + self.dropout = nn.Dropout(dropout_ratio) + + def forward(self, cnn_features, labels, counting_preds, images_mask): + if self.is_train: + _, num_steps = labels.shape + else: + num_steps = 36 + + batch_size, _, height, width = cnn_features.shape + images_mask = images_mask[:, :, ::self.ratio, ::self.ratio] + + word_probs = torch.zeros((batch_size, num_steps, self.word_num)).to(device=cnn_features.device) + word_alpha_sum = torch.zeros((batch_size, 1, height, width)).to(device=cnn_features.device) + + hidden = self.init_hidden(cnn_features, images_mask) + counting_context_weighted = self.counting_context_weight(counting_preds) + cnn_features_trans = self.encoder_feature_conv(cnn_features) + + position_embedding = PositionEmbeddingSine(256, normalize=True) + pos = position_embedding(cnn_features_trans, images_mask[:, 0, :, :]) + + cnn_features_trans = cnn_features_trans + pos + + word = torch.ones([batch_size]).long().to(device=cnn_features.device) # init word as sos + for i in range(num_steps): + word_embedding = self.embedding(word) + hidden = self.word_input_gru(word_embedding, hidden) + word_context_vec, _, word_alpha_sum = self.word_attention( + cnn_features, cnn_features_trans, hidden, word_alpha_sum, + images_mask) + + current_state = self.word_state_weight(hidden) + word_weighted_embedding = self.word_embedding_weight(word_embedding) + word_context_weighted = self.word_context_weight(word_context_vec) + + if self.dropout_prob: + word_out_state = self.dropout( + current_state + word_weighted_embedding + + word_context_weighted + counting_context_weighted) + else: + word_out_state = current_state + word_weighted_embedding + word_context_weighted + counting_context_weighted + + word_prob = self.word_convert(word_out_state) + word_probs[:, i] = word_prob + + if self.is_train: + word = labels[:, i] + else: + word = word_prob.argmax(1) + word = torch.mul( + word, labels[:, i] + ) # labels are oneslike tensor in infer/predict mode, torch.multiply + + return word_probs + + def init_hidden(self, features, feature_mask): + average = torch.sum(torch.sum(features * feature_mask, dim=-1), + dim=-1) / torch.sum( + (torch.sum(feature_mask, dim=-1)), dim=-1) + average = self.init_weight(average) + return torch.tanh(average) + + +''' +Attention Module +''' + + +class Attention(nn.Module): + def __init__(self, hidden_size, attention_dim): + super(Attention, self).__init__() + self.hidden = hidden_size + self.attention_dim = attention_dim + self.hidden_weight = nn.Linear(self.hidden, self.attention_dim) + self.attention_conv = nn.Conv2d( + 1, 512, kernel_size=11, padding=5, bias=False) + self.attention_weight = nn.Linear( + 512, self.attention_dim, bias=False) + self.alpha_convert = nn.Linear(self.attention_dim, 1) + + def forward(self, + cnn_features, + cnn_features_trans, + hidden, + alpha_sum, + image_mask=None): + query = self.hidden_weight(hidden) + alpha_sum_trans = self.attention_conv(alpha_sum) + coverage_alpha = self.attention_weight(alpha_sum_trans.permute(0, 2, 3, 1)) + alpha_score = torch.tanh( + query[:, None, None, :] + coverage_alpha + cnn_features_trans.permute(0, 2, 3, 1) + ) + energy = self.alpha_convert(alpha_score) + energy = energy - energy.max() + energy_exp = torch.exp(torch.squeeze(energy, -1)) + + if image_mask is not None: + energy_exp = energy_exp * torch.squeeze(image_mask, 1) + alpha = energy_exp / (energy_exp.sum(-1).sum(-1)[:,None,None] + 1e-10) + alpha_sum = torch.unsqueeze(alpha, 1) + alpha_sum + context_vector = torch.sum( + torch.sum((torch.unsqueeze(alpha, 1) * cnn_features), -1), -1) + + return context_vector, alpha, alpha_sum + + +class CANHead(nn.Module): + def __init__(self, in_channel, out_channel, ratio, attdecoder, **kwargs): + super(CANHead, self).__init__() + + self.in_channel = in_channel + self.out_channel = out_channel + + self.counting_decoder1 = CountingDecoder(self.in_channel, + self.out_channel, 3) # mscm + self.counting_decoder2 = CountingDecoder(self.in_channel, + self.out_channel, 5) + + self.decoder = AttDecoder(ratio, **attdecoder) + + self.ratio = ratio + + def forward(self, inputs, targets=None): + cnn_features, images_mask, labels = inputs + + counting_mask = images_mask[:, :, ::self.ratio, ::self.ratio] + counting_preds1, _ = self.counting_decoder1(cnn_features, counting_mask) + counting_preds2, _ = self.counting_decoder2(cnn_features, counting_mask) + counting_preds = (counting_preds1 + counting_preds2) / 2 + + word_probs = self.decoder(cnn_features, labels, counting_preds, + images_mask) + return word_probs, counting_preds, counting_preds1, counting_preds2 diff --git a/modules/pytorchocr/modeling/heads/rec_ctc_head.py b/modules/pytorchocr/modeling/heads/rec_ctc_head.py new file mode 100644 index 0000000..f4f870b --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_ctc_head.py @@ -0,0 +1,53 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F + +class CTCHead(nn.Module): + def __init__(self, + in_channels, + out_channels=6625, + fc_decay=0.0004, + mid_channels=None, + return_feats=False, + **kwargs): + super(CTCHead, self).__init__() + if mid_channels is None: + self.fc = nn.Linear( + in_channels, + out_channels, + bias=True,) + else: + self.fc1 = nn.Linear( + in_channels, + mid_channels, + bias=True, + ) + self.fc2 = nn.Linear( + mid_channels, + out_channels, + bias=True, + ) + + self.out_channels = out_channels + self.mid_channels = mid_channels + self.return_feats = return_feats + + + def forward(self, x, labels=None): + if self.mid_channels is None: + predicts = self.fc(x) + else: + x = self.fc1(x) + predicts = self.fc2(x) + + if self.return_feats: + result = (x, predicts) + else: + result = predicts + + if not self.training: + predicts = F.softmax(predicts, dim=2) + result = predicts + + return result \ No newline at end of file diff --git a/modules/pytorchocr/modeling/heads/rec_multi_head.py b/modules/pytorchocr/modeling/heads/rec_multi_head.py new file mode 100644 index 0000000..a79714d --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_multi_head.py @@ -0,0 +1,88 @@ +import torch +import torch.nn as nn + +from pytorchocr.modeling.necks.rnn import Im2Seq, SequenceEncoder +from .rec_nrtr_head import Transformer +from .rec_ctc_head import CTCHead +from .rec_sar_head import SARHead + +class FCTranspose(nn.Module): + def __init__(self, in_channels, out_channels, only_transpose=False): + super().__init__() + self.only_transpose = only_transpose + if not self.only_transpose: + self.fc = nn.Linear(in_channels, out_channels, bias=False) + + def forward(self, x): + if self.only_transpose: + return x.permute([0, 2, 1]) + else: + return self.fc(x.permute([0, 2, 1])) + + +class MultiHead(nn.Module): + def __init__(self, in_channels, out_channels_list, **kwargs): + super().__init__() + self.head_list = kwargs.pop('head_list') + + self.gtc_head = 'sar' + assert len(self.head_list) >= 2 + for idx, head_name in enumerate(self.head_list): + name = list(head_name)[0] + if name == 'SARHead': + pass + # # sar head + # sar_args = self.head_list[idx][name] + # self.sar_head = eval(name)(in_channels=in_channels, \ + # out_channels=out_channels_list['SARLabelDecode'], **sar_args) + elif name == 'NRTRHead': + pass + # gtc_args = self.head_list[idx][name] + # max_text_length = gtc_args.get('max_text_length', 25) + # nrtr_dim = gtc_args.get('nrtr_dim', 256) + # num_decoder_layers = gtc_args.get('num_decoder_layers', 4) + # self.before_gtc = nn.Sequential( + # nn.Flatten(2), FCTranspose(in_channels, nrtr_dim)) + # self.gtc_head = Transformer( + # d_model=nrtr_dim, + # nhead=nrtr_dim // 32, + # num_encoder_layers=-1, + # beam_size=-1, + # num_decoder_layers=num_decoder_layers, + # max_len=max_text_length, + # dim_feedforward=nrtr_dim * 4, + # out_channels=out_channels_list['NRTRLabelDecode']) + elif name == 'CTCHead': + # ctc neck + self.encoder_reshape = Im2Seq(in_channels) + neck_args = self.head_list[idx][name]['Neck'] + encoder_type = neck_args.pop('name') + self.ctc_encoder = SequenceEncoder(in_channels=in_channels, \ + encoder_type=encoder_type, **neck_args) + # ctc head + head_args = self.head_list[idx][name].get('Head', {}) + if head_args is None: + head_args = {} + self.ctc_head = eval(name)(in_channels=self.ctc_encoder.out_channels, \ + out_channels=out_channels_list['CTCLabelDecode'], **head_args) + else: + raise NotImplementedError( + '{} is not supported in MultiHead yet'.format(name)) + + def forward(self, x, data=None): + ctc_encoder = self.ctc_encoder(x) + ctc_out = self.ctc_head(ctc_encoder) + head_out = dict() + head_out['ctc'] = ctc_out + head_out['res'] = ctc_out + head_out['ctc_neck'] = ctc_encoder + # eval mode + if not self.training: + return ctc_out + if self.gtc_head == 'sar': + sar_out = self.sar_head(x, data[1:])['res'] + head_out['sar'] = sar_out + else: + gtc_out = self.gtc_head(self.before_gtc(x), data[1:])['res'] + head_out['nrtr'] = gtc_out + return head_out diff --git a/modules/pytorchocr/modeling/heads/rec_nrtr_head.py b/modules/pytorchocr/modeling/heads/rec_nrtr_head.py new file mode 100644 index 0000000..13cb09a --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_nrtr_head.py @@ -0,0 +1,812 @@ +import math +import torch +import copy +from torch import nn +import torch.nn.functional as F +from torch.nn import ModuleList as LayerList +from torch.nn.init import xavier_uniform_ +from torch.nn import Dropout, LayerNorm, Conv2d +import numpy as np +from pytorchocr.modeling.heads.multiheadAttention import MultiheadAttention +from torch.nn.init import xavier_normal_ + + +class Transformer(nn.Module): + """A transformer model. User is able to modify the attributes as needed. The architechture + is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, + Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and + Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information + Processing Systems, pages 6000-6010. + + Args: + d_model: the number of expected features in the encoder/decoder inputs (default=512). + nhead: the number of heads in the multiheadattention models (default=8). + num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). + num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + custom_encoder: custom encoder (default=None). + custom_decoder: custom decoder (default=None). + + """ + + def __init__(self, + d_model=512, + nhead=8, + num_encoder_layers=6, + beam_size=0, + num_decoder_layers=6, + max_len=25, + dim_feedforward=1024, + attention_dropout_rate=0.0, + residual_dropout_rate=0.1, + custom_encoder=None, + custom_decoder=None, + in_channels=0, + out_channels=0, + scale_embedding=True): + super(Transformer, self).__init__() + self.out_channels = out_channels # out_channels + 1 + self.max_len = max_len + self.embedding = Embeddings( + d_model=d_model, + vocab=self.out_channels, + padding_idx=0, + scale_embedding=scale_embedding) + self.positional_encoding = PositionalEncoding( + dropout=residual_dropout_rate, + dim=d_model, ) + if custom_encoder is not None: + self.encoder = custom_encoder + else: + if num_encoder_layers > 0: + encoder_layer = TransformerEncoderLayer( + d_model, nhead, dim_feedforward, attention_dropout_rate, + residual_dropout_rate) + self.encoder = TransformerEncoder(encoder_layer, + num_encoder_layers) + else: + self.encoder = None + + if custom_decoder is not None: + self.decoder = custom_decoder + else: + decoder_layer = TransformerDecoderLayer( + d_model, nhead, dim_feedforward, attention_dropout_rate, + residual_dropout_rate) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers) + + self._reset_parameters() + self.beam_size = beam_size + self.d_model = d_model + self.nhead = nhead + self.tgt_word_prj = nn.Linear( + d_model, self.out_channels, bias=False) + w0 = np.random.normal(0.0, d_model ** -0.5, + (self.out_channels, d_model)).astype(np.float32) + self.tgt_word_prj.weight.data = torch.from_numpy(w0) + self.apply(self._init_weights) + + def _init_weights(self, m): + + if isinstance(m, nn.Conv2d): + xavier_normal_(m.weight) + if m.bias is not None: + torch.nn.init.zeros_(m.bias) + + def forward_train(self, src, tgt): + tgt = tgt[:, :-1] + + tgt_key_padding_mask = self.generate_padding_mask(tgt) + tgt = self.embedding(tgt).permute(1, 0, 2) + tgt = self.positional_encoding(tgt) + tgt_mask = self.generate_square_subsequent_mask(tgt.shape[0], tgt.device) + + if self.encoder is not None: + src = self.positional_encoding(src.permute(1, 0, 2)) + memory = self.encoder(src) + else: + memory = src.squeeze(2).permute(2, 0, 1) + output = self.decoder( + tgt, + memory, + tgt_mask=tgt_mask, + memory_mask=None, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=None) + output = output.permute(1, 0, 2) + logit = self.tgt_word_prj(output) + return logit + + def forward(self, src, targets=None): + """Take in and process masked source/target sequences. + Args: + src: the sequence to the encoder (required). + tgt: the sequence to the decoder (required). + Shape: + - src: :math:`(S, N, E)`. + - tgt: :math:`(T, N, E)`. + Examples: + >>> output = transformer_model(src, tgt) + """ + + if self.training: + max_len = targets[1].max() + tgt = targets[0][:, :2 + max_len] + return self.forward_train(src, tgt) + else: + if self.beam_size > 0: + return self.forward_beam(src) + else: + return self.forward_test(src) + + def forward_test(self, src): + bs = src.shape[0] + if self.encoder is not None: + src = self.positional_encoding(src.permute(1, 0, 2)) + memory = self.encoder(src) + else: + memory = torch.squeeze(src, 2).permute(2, 0, 1) + dec_seq = torch.full((bs, 1), 2, dtype=torch.int64) + dec_prob = torch.full((bs, 1), 1., dtype=torch.float32) + for len_dec_seq in range(1, 25): + dec_seq_embed = self.embedding(dec_seq).permute(1, 0, 2) + dec_seq_embed = self.positional_encoding(dec_seq_embed) + tgt_mask = self.generate_square_subsequent_mask( + dec_seq_embed.shape[0]) + output = self.decoder( + dec_seq_embed, + memory, + tgt_mask=tgt_mask, + memory_mask=None, + tgt_key_padding_mask=None, + memory_key_padding_mask=None) + dec_output = output.permute(1, 0, 2) + dec_output = dec_output[:, -1, :] + tgt_word_prj = self.tgt_word_prj(dec_output) + word_prob = F.softmax(tgt_word_prj, dim=1) + preds_idx = word_prob.argmax(dim=1) + if torch.equal( + preds_idx, + torch.full( + preds_idx.shape, 3, dtype=torch.int64)): + break + preds_prob = torch.max(word_prob, dim=1).values + dec_seq = torch.cat( + [dec_seq, torch.reshape(preds_idx, (-1, 1))], dim=1) + dec_prob = torch.cat( + [dec_prob, torch.reshape(preds_prob, (-1, 1))], dim=1) + return [dec_seq, dec_prob] + + def forward_beam(self, images): + ''' Translation work in one batch ''' + + def get_inst_idx_to_tensor_position_map(inst_idx_list): + ''' Indicate the position of an instance in a tensor. ''' + return { + inst_idx: tensor_position + for tensor_position, inst_idx in enumerate(inst_idx_list) + } + + def collect_active_part(beamed_tensor, curr_active_inst_idx, + n_prev_active_inst, n_bm): + ''' Collect tensor parts associated to active instances. ''' + + beamed_tensor_shape = beamed_tensor.shape + n_curr_active_inst = len(curr_active_inst_idx) + new_shape = (n_curr_active_inst * n_bm, beamed_tensor_shape[1], + beamed_tensor_shape[2]) + + beamed_tensor = beamed_tensor.reshape([n_prev_active_inst, -1]) + beamed_tensor = beamed_tensor.index_select( + curr_active_inst_idx, axis=0) + beamed_tensor = beamed_tensor.reshape(new_shape) + + return beamed_tensor + + def collate_active_info(src_enc, inst_idx_to_position_map, + active_inst_idx_list): + # Sentences which are still active are collected, + # so the decoder will not run on completed sentences. + + n_prev_active_inst = len(inst_idx_to_position_map) + active_inst_idx = [ + inst_idx_to_position_map[k] for k in active_inst_idx_list + ] + active_inst_idx = torch.tensor(active_inst_idx, dtype=torch.int64) + active_src_enc = collect_active_part( + src_enc.permute(1, 0, 2), active_inst_idx, + n_prev_active_inst, n_bm).permute(1, 0, 2) + active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map( + active_inst_idx_list) + return active_src_enc, active_inst_idx_to_position_map + + def beam_decode_step(inst_dec_beams, len_dec_seq, enc_output, + inst_idx_to_position_map, n_bm, + memory_key_padding_mask): + ''' Decode and update beam status, and then return active beam idx ''' + + def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq): + dec_partial_seq = [ + b.get_current_state() for b in inst_dec_beams if not b.done + ] + dec_partial_seq = torch.stack(dec_partial_seq) + dec_partial_seq = dec_partial_seq.reshape([-1, len_dec_seq]) + return dec_partial_seq + + def predict_word(dec_seq, enc_output, n_active_inst, n_bm, + memory_key_padding_mask): + dec_seq = self.embedding(dec_seq).permute(1, 0, 2) + dec_seq = self.positional_encoding(dec_seq) + tgt_mask = self.generate_square_subsequent_mask( + dec_seq.shape[0]) + dec_output = self.decoder( + dec_seq, + enc_output, + tgt_mask=tgt_mask, + tgt_key_padding_mask=None, + memory_key_padding_mask=memory_key_padding_mask, ) + dec_output = dec_output.permute(1, 0, 2) + dec_output = dec_output[:, + -1, :] # Pick the last step: (bh * bm) * d_h + word_prob = F.softmax(self.tgt_word_prj(dec_output), dim=1) + word_prob = torch.reshape(word_prob, (n_active_inst, n_bm, -1)) + return word_prob + + def collect_active_inst_idx_list(inst_beams, word_prob, + inst_idx_to_position_map): + active_inst_idx_list = [] + for inst_idx, inst_position in inst_idx_to_position_map.items(): + is_inst_complete = inst_beams[inst_idx].advance(word_prob[ + inst_position]) + if not is_inst_complete: + active_inst_idx_list += [inst_idx] + + return active_inst_idx_list + + n_active_inst = len(inst_idx_to_position_map) + dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq) + word_prob = predict_word(dec_seq, enc_output, n_active_inst, n_bm, + None) + # Update the beam with predicted word prob information and collect incomplete instances + active_inst_idx_list = collect_active_inst_idx_list( + inst_dec_beams, word_prob, inst_idx_to_position_map) + return active_inst_idx_list + + def collect_hypothesis_and_scores(inst_dec_beams, n_best): + all_hyp, all_scores = [], [] + for inst_idx in range(len(inst_dec_beams)): + scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores() + all_scores += [scores[:n_best]] + hyps = [ + inst_dec_beams[inst_idx].get_hypothesis(i) + for i in tail_idxs[:n_best] + ] + all_hyp += [hyps] + return all_hyp, all_scores + + with torch.no_grad(): + #-- Encode + if self.encoder is not None: + src = self.positional_encoding(images.permute(1, 0, 2)) + src_enc = self.encoder(src) + else: + src_enc = images.squeeze(2).transpose([0, 2, 1]) + + n_bm = self.beam_size + src_shape = src_enc.shape + inst_dec_beams = [Beam(n_bm) for _ in range(1)] + active_inst_idx_list = list(range(1)) + # Repeat data for beam search + # src_enc = paddle.tile(src_enc, [1, n_bm, 1]) + src_enc = src_enc.repeat(1, n_bm, 1) + inst_idx_to_position_map = get_inst_idx_to_tensor_position_map( + active_inst_idx_list) + # Decode + for len_dec_seq in range(1, 25): + src_enc_copy = src_enc.clone() + active_inst_idx_list = beam_decode_step( + inst_dec_beams, len_dec_seq, src_enc_copy, + inst_idx_to_position_map, n_bm, None) + if not active_inst_idx_list: + break # all instances have finished their path to + src_enc, inst_idx_to_position_map = collate_active_info( + src_enc_copy, inst_idx_to_position_map, + active_inst_idx_list) + batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, + 1) + result_hyp = [] + hyp_scores = [] + for bs_hyp, score in zip(batch_hyp, batch_scores): + l = len(bs_hyp[0]) + bs_hyp_pad = bs_hyp[0] + [3] * (25 - l) + result_hyp.append(bs_hyp_pad) + score = float(score) / l + hyp_score = [score for _ in range(25)] + hyp_scores.append(hyp_score) + return [ + torch.tensor( + np.array(result_hyp), dtype=torch.int64), + torch.tensor(hyp_scores) + ] + + def generate_square_subsequent_mask(self, sz): + """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). + Unmasked positions are filled with float(0.0). + """ + mask = torch.zeros([sz, sz], dtype=torch.float32) + mask_inf = torch.triu( + torch.full( + size=[sz, sz], fill_value=float('-Inf'), dtype=torch.float32), + diagonal=1) + mask = mask + mask_inf + return mask + + def generate_padding_mask(self, x): + # padding_mask = paddle.equal(x, paddle.to_tensor(0, dtype=x.dtype)) + padding_mask = (x == torch.tensor(0, dtype=x.dtype)) + return padding_mask + + def _reset_parameters(self): + """Initiate parameters in the transformer model.""" + + for p in self.parameters(): + if p.dim() > 1: + xavier_uniform_(p) + + +class TransformerEncoder(nn.Module): + """TransformerEncoder is a stack of N encoder layers + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + """ + + def __init__(self, encoder_layer, num_layers): + super(TransformerEncoder, self).__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + + def forward(self, src): + """Pass the input through the endocder layers in turn. + Args: + src: the sequnce to the encoder (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + """ + output = src + + for i in range(self.num_layers): + output = self.layers[i](output, + src_mask=None, + src_key_padding_mask=None) + + return output + + +class TransformerDecoder(nn.Module): + """TransformerDecoder is a stack of N decoder layers + + Args: + decoder_layer: an instance of the TransformerDecoderLayer() class (required). + num_layers: the number of sub-decoder-layers in the decoder (required). + norm: the layer normalization component (optional). + + """ + + def __init__(self, decoder_layer, num_layers): + super(TransformerDecoder, self).__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + + def forward(self, + tgt, + memory, + tgt_mask=None, + memory_mask=None, + tgt_key_padding_mask=None, + memory_key_padding_mask=None): + """Pass the inputs (and mask) through the decoder layer in turn. + + Args: + tgt: the sequence to the decoder (required). + memory: the sequnce from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + """ + output = tgt + for i in range(self.num_layers): + output = self.layers[i]( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask) + + return output + + +class TransformerEncoderLayer(nn.Module): + """TransformerEncoderLayer is made up of self-attn and feedforward network. + This standard encoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + + """ + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + attention_dropout_rate=0.0, + residual_dropout_rate=0.1): + super(TransformerEncoderLayer, self).__init__() + self.self_attn = MultiheadAttention( + d_model, nhead, dropout=attention_dropout_rate) + + self.conv1 = nn.Conv2d( + in_channels=d_model, + out_channels=dim_feedforward, + kernel_size=(1, 1)) + self.conv2 = nn.Conv2d( + in_channels=dim_feedforward, + out_channels=d_model, + kernel_size=(1, 1)) + + self.norm1 = LayerNorm(d_model) + self.norm2 = LayerNorm(d_model) + self.dropout1 = Dropout(residual_dropout_rate) + self.dropout2 = Dropout(residual_dropout_rate) + + def forward(self, src, src_mask=None, src_key_padding_mask=None): + """Pass the input through the endocder layer. + Args: + src: the sequnce to the encoder layer (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + """ + src2 = self.self_attn( + src, + src, + src, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask) + src = src + self.dropout1(src2) + src = self.norm1(src) + + src = src.permute(1, 2, 0) + src = torch.unsqueeze(src, 2) + src2 = self.conv2(F.relu(self.conv1(src))) + src2 = torch.squeeze(src2, 2) + src2 = src2.permute(2, 0, 1) + src = torch.squeeze(src, 2) + src = src.permute(2, 0, 1) + + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + +class TransformerDecoderLayer(nn.Module): + """TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network. + This standard decoder layer is based on the paper "Attention Is All You Need". + Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, + Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in + Neural Information Processing Systems, pages 6000-6010. Users may modify or implement + in a different way during application. + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + + """ + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + attention_dropout_rate=0.0, + residual_dropout_rate=0.1): + super(TransformerDecoderLayer, self).__init__() + self.self_attn = MultiheadAttention( + d_model, nhead, dropout=attention_dropout_rate) + self.multihead_attn = MultiheadAttention( + d_model, nhead, dropout=attention_dropout_rate) + + self.conv1 = nn.Conv2d( + in_channels=d_model, + out_channels=dim_feedforward, + kernel_size=(1, 1)) + self.conv2 = nn.Conv2d( + in_channels=dim_feedforward, + out_channels=d_model, + kernel_size=(1, 1)) + + self.norm1 = LayerNorm(d_model) + self.norm2 = LayerNorm(d_model) + self.norm3 = LayerNorm(d_model) + self.dropout1 = Dropout(residual_dropout_rate) + self.dropout2 = Dropout(residual_dropout_rate) + self.dropout3 = Dropout(residual_dropout_rate) + + def forward(self, + tgt, + memory, + tgt_mask=None, + memory_mask=None, + tgt_key_padding_mask=None, + memory_key_padding_mask=None): + """Pass the inputs (and mask) through the decoder layer. + + Args: + tgt: the sequence to the decoder layer (required). + memory: the sequnce from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + + """ + tgt2 = self.self_attn( + tgt, + tgt, + tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask) + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn( + tgt, + memory, + memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + # default + tgt = tgt.permute(1, 2, 0) + tgt = torch.unsqueeze(tgt, 2) + tgt2 = self.conv2(F.relu(self.conv1(tgt))) + tgt2 = torch.squeeze(tgt2, 2) + tgt2 = tgt2.permute(2, 0, 1) + tgt = torch.squeeze(tgt, 2) + tgt = tgt.permute(2, 0, 1) + + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + +def _get_clones(module, N): + return LayerList([copy.deepcopy(module) for i in range(N)]) + + +class PositionalEncoding(nn.Module): + """Inject some information about the relative or absolute position of the tokens + in the sequence. The positional encodings have the same dimension as + the embeddings, so that the two can be summed. Here, we use sine and cosine + functions of different frequencies. + .. math:: + \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) + \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) + \text{where pos is the word position and i is the embed idx) + Args: + d_model: the embed dim (required). + dropout: the dropout value (default=0.1). + max_len: the max. length of the incoming sequence (default=5000). + Examples: + >>> pos_encoder = PositionalEncoding(d_model) + """ + + def __init__(self, dropout, dim, max_len=5000): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros([max_len, dim]) + position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, dim, 2).type(torch.float32) * + (-math.log(10000.0) / dim)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = torch.unsqueeze(pe, 0) + pe = pe.permute(1, 0, 2) + self.register_buffer('pe', pe) + + def forward(self, x): + """Inputs of forward function + Args: + x: the sequence fed to the positional encoder model (required). + Shape: + x: [sequence length, batch size, embed dim] + output: [sequence length, batch size, embed dim] + Examples: + >>> output = pos_encoder(x) + """ + x = x + self.pe[:x.shape[0], :] + return self.dropout(x) + + +class PositionalEncoding_2d(nn.Module): + """Inject some information about the relative or absolute position of the tokens + in the sequence. The positional encodings have the same dimension as + the embeddings, so that the two can be summed. Here, we use sine and cosine + functions of different frequencies. + .. math:: + \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model)) + \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model)) + \text{where pos is the word position and i is the embed idx) + Args: + d_model: the embed dim (required). + dropout: the dropout value (default=0.1). + max_len: the max. length of the incoming sequence (default=5000). + Examples: + >>> pos_encoder = PositionalEncoding(d_model) + """ + + def __init__(self, dropout, dim, max_len=5000): + super(PositionalEncoding_2d, self).__init__() + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros([max_len, dim]) + position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, dim, 2).type(torch.float32) * + (-math.log(10000.0) / dim)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = torch.unsqueeze(pe, 0).permute(1, 0, 2) + self.register_buffer('pe', pe) + + self.avg_pool_1 = nn.AdaptiveAvgPool2d((1, 1)) + self.linear1 = nn.Linear(dim, dim) + self.linear1.weight.data.fill_(1.) + self.avg_pool_2 = nn.AdaptiveAvgPool2d((1, 1)) + self.linear2 = nn.Linear(dim, dim) + self.linear2.weight.data.fill_(1.) + + def forward(self, x): + """Inputs of forward function + Args: + x: the sequence fed to the positional encoder model (required). + Shape: + x: [sequence length, batch size, embed dim] + output: [sequence length, batch size, embed dim] + Examples: + >>> output = pos_encoder(x) + """ + w_pe = self.pe[:x.shape[-1], :] + w1 = self.linear1(self.avg_pool_1(x).squeeze()).unsqueeze(0) + w_pe = w_pe * w1 + w_pe = w_pe.permute(1, 2, 0) + w_pe = torch.unsqueeze(w_pe, 2) + + h_pe = self.pe[:x.shape[-2], :] + w2 = self.linear2(self.avg_pool_2(x).squeeze()).unsqueeze(0) + h_pe = h_pe * w2 + h_pe = h_pe.permute(1, 2, 0) + h_pe = torch.unsqueeze(h_pe, 3) + + x = x + w_pe + h_pe + x = torch.reshape( + x, [x.shape[0], x.shape[1], x.shape[2] * x.shape[3]] + ).permute(2,0,1) + + return self.dropout(x) + + +class Embeddings(nn.Module): + def __init__(self, d_model, vocab, padding_idx, scale_embedding): + super(Embeddings, self).__init__() + self.embedding = nn.Embedding(vocab, d_model, padding_idx=padding_idx) + w0 = np.random.normal(0.0, d_model**-0.5, + (vocab, d_model)).astype(np.float32) + self.embedding.weight.data = torch.from_numpy(w0) + self.d_model = d_model + self.scale_embedding = scale_embedding + + def forward(self, x): + if self.scale_embedding: + x = self.embedding(x) + return x * math.sqrt(self.d_model) + return self.embedding(x) + + +class Beam(): + ''' Beam search ''' + + def __init__(self, size, device=False): + + self.size = size + self._done = False + # The score for each translation on the beam. + self.scores = torch.zeros((size, ), dtype=torch.float32) + self.all_scores = [] + # The backpointers at each time-step. + self.prev_ks = [] + # The outputs at each time-step. + self.next_ys = [torch.full((size, ), 0, dtype=torch.int64)] + self.next_ys[0][0] = 2 + + def get_current_state(self): + "Get the outputs for the current timestep." + return self.get_tentative_hypothesis() + + def get_current_origin(self): + "Get the backpointers for the current timestep." + return self.prev_ks[-1] + + @property + def done(self): + return self._done + + def advance(self, word_prob): + "Update beam status and check if finished or not." + num_words = word_prob.shape[1] + + # Sum the previous scores. + if len(self.prev_ks) > 0: + beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob) + else: + beam_lk = word_prob[0] + + flat_beam_lk = beam_lk.reshape([-1]) + best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, + True) # 1st sort + self.all_scores.append(self.scores) + self.scores = best_scores + # bestScoresId is flattened as a (beam x word) array, + # so we need to calculate which word and beam each score came from + prev_k = best_scores_id // num_words + self.prev_ks.append(prev_k) + self.next_ys.append(best_scores_id - prev_k * num_words) + # End condition is when top-of-beam is EOS. + if self.next_ys[-1][0] == 3: + self._done = True + self.all_scores.append(self.scores) + + return self._done + + def sort_scores(self): + "Sort the scores." + return self.scores, torch.tensor( + [i for i in range(int(self.scores.shape[0]))], dtype=torch.int32) + + def get_the_best_score_and_idx(self): + "Get the score of the best in the beam." + scores, ids = self.sort_scores() + return scores[1], ids[1] + + def get_tentative_hypothesis(self): + "Get the decoded sequence for the current timestep." + if len(self.next_ys) == 1: + dec_seq = self.next_ys[0].unsqueeze(1) + else: + _, keys = self.sort_scores() + hyps = [self.get_hypothesis(k) for k in keys] + hyps = [[2] + h for h in hyps] + dec_seq = torch.tensor(hyps, dtype=torch.int64) + return dec_seq + + def get_hypothesis(self, k): + """ Walk back to construct the full hypothesis. """ + hyp = [] + for j in range(len(self.prev_ks) - 1, -1, -1): + hyp.append(self.next_ys[j + 1][k]) + k = self.prev_ks[j][k] + return list(map(lambda x: x.item(), hyp[::-1])) diff --git a/modules/pytorchocr/modeling/heads/rec_sar_head.py b/modules/pytorchocr/modeling/heads/rec_sar_head.py new file mode 100644 index 0000000..128081c --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_sar_head.py @@ -0,0 +1,403 @@ +""" +This code is refer from: +https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/encoders/sar_encoder.py +https://github.com/open-mmlab/mmocr/blob/main/mmocr/models/textrecog/decoders/sar_decoder.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# import math +import torch +import torch.nn as nn +import torch.nn.functional as F +# import paddle +# from paddle import ParamAttr +# import paddle.nn as nn +# import paddle.nn.functional as F + + +class SAREncoder(nn.Module): + """ + Args: + enc_bi_rnn (bool): If True, use bidirectional RNN in encoder. + enc_drop_rnn (float): Dropout probability of RNN layer in encoder. + enc_gru (bool): If True, use GRU, else LSTM in encoder. + d_model (int): Dim of channels from backbone. + d_enc (int): Dim of encoder RNN layer. + mask (bool): If True, mask padding in RNN sequence. + """ + + def __init__(self, + enc_bi_rnn=False, + enc_drop_rnn=0.0, + enc_gru=False, + d_model=512, + d_enc=512, + mask=True, + **kwargs): + super().__init__() + assert isinstance(enc_bi_rnn, bool) + assert isinstance(enc_drop_rnn, (int, float)) + assert 0 <= enc_drop_rnn < 1.0 + assert isinstance(enc_gru, bool) + assert isinstance(d_model, int) + assert isinstance(d_enc, int) + assert isinstance(mask, bool) + + self.enc_bi_rnn = enc_bi_rnn + self.enc_drop_rnn = enc_drop_rnn + self.mask = mask + + # LSTM Encoder + # if enc_bi_rnn: + # direction = 'bidirectional' + # else: + # direction = 'forward' + kwargs = dict( + input_size=d_model, + hidden_size=d_enc, + num_layers=2, + batch_first=True, + dropout=enc_drop_rnn, + bidirectional=enc_bi_rnn) + if enc_gru: + self.rnn_encoder = nn.GRU(**kwargs) + else: + self.rnn_encoder = nn.LSTM(**kwargs) + + # global feature transformation + encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1) + self.linear = nn.Linear(encoder_rnn_out_size, encoder_rnn_out_size) + + def forward(self, feat, img_metas=None): + if img_metas is not None: + assert len(img_metas[0]) == feat.size(0) + + valid_ratios = None + if img_metas is not None and self.mask: + valid_ratios = img_metas[-1] + + h_feat = feat.shape[2] # bsz c h w + feat_v = F.max_pool2d( + feat, kernel_size=(h_feat, 1), stride=1, padding=0) + feat_v = feat_v.squeeze(2) # bsz * C * W + feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C + holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C + + if valid_ratios is not None: + valid_hf = [] + T = holistic_feat.size(1) + for i in range(valid_ratios.size(0)): + valid_step = torch.min(T, torch.ceil(T * valid_ratios[i])) - 1 + # valid_step = paddle.minimum( + # T, paddle.ceil(valid_ratios[i] * T).astype('int32')) - 1 + valid_hf.append(holistic_feat[i, valid_step, :]) + valid_hf = torch.stack(valid_hf, dim=0) + else: + valid_hf = holistic_feat[:, -1, :] # bsz * C + holistic_feat = self.linear(valid_hf) # bsz * C + + return holistic_feat + + +class BaseDecoder(nn.Module): + def __init__(self, **kwargs): + super().__init__() + + def forward_train(self, feat, out_enc, targets, img_metas): + raise NotImplementedError + + def forward_test(self, feat, out_enc, img_metas): + raise NotImplementedError + + def forward(self, + feat, + out_enc, + label=None, + img_metas=None, + train_mode=True): + self.train_mode = train_mode + + if train_mode: + return self.forward_train(feat, out_enc, label, img_metas) + return self.forward_test(feat, out_enc, img_metas) + + +class ParallelSARDecoder(BaseDecoder): + """ + Args: + out_channels (int): Output class number. + enc_bi_rnn (bool): If True, use bidirectional RNN in encoder. + dec_bi_rnn (bool): If True, use bidirectional RNN in decoder. + dec_drop_rnn (float): Dropout of RNN layer in decoder. + dec_gru (bool): If True, use GRU, else LSTM in decoder. + d_model (int): Dim of channels from backbone. + d_enc (int): Dim of encoder RNN layer. + d_k (int): Dim of channels of attention module. + pred_dropout (float): Dropout probability of prediction layer. + max_seq_len (int): Maximum sequence length for decoding. + mask (bool): If True, mask padding in feature map. + start_idx (int): Index of start token. + padding_idx (int): Index of padding token. + pred_concat (bool): If True, concat glimpse feature from + attention with holistic feature and hidden state. + """ + + def __init__( + self, + out_channels, # 90 + unknown + start + padding + enc_bi_rnn=False, + dec_bi_rnn=False, + dec_drop_rnn=0.0, + dec_gru=False, + d_model=512, + d_enc=512, + d_k=64, + pred_dropout=0.0, + max_text_length=30, + mask=True, + pred_concat=True, + **kwargs): + super().__init__() + + self.num_classes = out_channels + self.enc_bi_rnn = enc_bi_rnn + self.d_k = d_k + self.start_idx = out_channels - 2 + self.padding_idx = out_channels - 1 + self.max_seq_len = max_text_length + self.mask = mask + self.pred_concat = pred_concat + + encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1) + decoder_rnn_out_size = encoder_rnn_out_size * (int(dec_bi_rnn) + 1) + + # 2D attention layer + self.conv1x1_1 = nn.Linear(decoder_rnn_out_size, d_k) + self.conv3x3_1 = nn.Conv2d( + d_model, d_k, kernel_size=3, stride=1, padding=1) + self.conv1x1_2 = nn.Linear(d_k, 1) + + # Decoder RNN layer + # if dec_bi_rnn: + # direction = 'bidirectional' + # else: + # direction = 'forward' + + kwargs = dict( + input_size=encoder_rnn_out_size, + hidden_size=encoder_rnn_out_size, + num_layers=2, + batch_first=True, + dropout=dec_drop_rnn, + bidirectional=dec_bi_rnn) + if dec_gru: + self.rnn_decoder = nn.GRU(**kwargs) + else: + self.rnn_decoder = nn.LSTM(**kwargs) + + # Decoder input embedding + self.embedding = nn.Embedding( + self.num_classes, + encoder_rnn_out_size, + padding_idx=self.padding_idx) + + # Prediction layer + self.pred_dropout = nn.Dropout(pred_dropout) + pred_num_classes = self.num_classes - 1 + if pred_concat: + fc_in_channel = decoder_rnn_out_size + d_model + encoder_rnn_out_size + else: + fc_in_channel = d_model + self.prediction = nn.Linear(fc_in_channel, pred_num_classes) + + def _2d_attention(self, + decoder_input, + feat, + holistic_feat, + valid_ratios=None): + + y = self.rnn_decoder(decoder_input)[0] + # y: bsz * (seq_len + 1) * hidden_size + + attn_query = self.conv1x1_1(y) # bsz * (seq_len + 1) * attn_size + bsz, seq_len, attn_size = attn_query.shape + # attn_query = paddle.unsqueeze(attn_query, axis=[3, 4]) + attn_query = attn_query.view(bsz, seq_len, attn_size, 1, 1) + # attn_query = attn_query.unsqueeze(3).unsqueeze(4) + # (bsz, seq_len + 1, attn_size, 1, 1) + + attn_key = self.conv3x3_1(feat) + # bsz * attn_size * h * w + attn_key = attn_key.unsqueeze(1) + # bsz * 1 * attn_size * h * w + + attn_weight = torch.tanh(torch.add(attn_key, attn_query)) + + # bsz * (seq_len + 1) * attn_size * h * w + attn_weight = attn_weight.permute(0, 1, 3, 4, 2).contiguous() + # bsz * (seq_len + 1) * h * w * attn_size + attn_weight = self.conv1x1_2(attn_weight) + # bsz * (seq_len + 1) * h * w * 1 + bsz, T, h, w, c = attn_weight.size() + assert c == 1 + + if valid_ratios is not None: + # cal mask of attention weight + for i in range(valid_ratios.size(0)): + valid_width = torch.min(w, torch.ceil(w * valid_ratios[i])) + # valid_width = paddle.minimum( + # w, paddle.ceil(valid_ratios[i] * w).astype("int32")) + if valid_width < w: + attn_weight[i, :, :, valid_width:, :] = float('-inf') + + # attn_weight = paddle.reshape(attn_weight, [bsz, T, -1]) + attn_weight = attn_weight.view(bsz, T, -1) + attn_weight = F.softmax(attn_weight, dim=-1) + + attn_weight = attn_weight.view(bsz, T, h, w, + c).permute(0, 1, 4, 2, 3).contiguous() + # attn_weight: bsz * T * c * h * w + # feat: bsz * c * h * w + attn_feat = torch.sum( + torch.mul(feat.unsqueeze(1), attn_weight), (3, 4), keepdim=False) + # bsz * (seq_len + 1) * C + + # Linear transformation + if self.pred_concat: + hf_c = holistic_feat.shape[-1] + holistic_feat = holistic_feat.expand(bsz, seq_len, hf_c) + y = self.prediction(torch.cat((y, attn_feat, holistic_feat), 2)) + else: + y = self.prediction(attn_feat) + # bsz * (seq_len + 1) * num_classes + if self.train_mode: + y = self.pred_dropout(y) + + return y + + def forward_train(self, feat, out_enc, label, img_metas): + ''' + img_metas: [label, valid_ratio] + ''' + if img_metas is not None: + assert img_metas[0].size(0) == feat.size(0) + + valid_ratios = None + if img_metas is not None and self.mask: + valid_ratios = img_metas[-1] + + lab_embedding = self.embedding(label) + # bsz * seq_len * emb_dim + out_enc = out_enc.unsqueeze(1) + # bsz * 1 * emb_dim + in_dec = torch.cat((out_enc, lab_embedding), dim=1) + # bsz * (seq_len + 1) * C + out_dec = self._2d_attention( + in_dec, feat, out_enc, valid_ratios=valid_ratios) + + return out_dec[:, 1:, :] # bsz * seq_len * num_classes + + def forward_test(self, feat, out_enc, img_metas): + if img_metas is not None: + assert len(img_metas[0]) == feat.shape[0] + + valid_ratios = None + if img_metas is not None and self.mask: + valid_ratios = img_metas[-1] + + seq_len = self.max_seq_len + bsz = feat.size(0) + start_token = torch.full( + (bsz, ), fill_value=self.start_idx, device=feat.device,dtype=torch.long) + # bsz + start_token = self.embedding(start_token) + # bsz * emb_dim + emb_dim = start_token.shape[1] + # start_token = start_token.unsqueeze(1).expand(-1, seq_len, -1) + start_token = start_token.unsqueeze(1).expand(bsz, seq_len, emb_dim) + # bsz * seq_len * emb_dim + out_enc = out_enc.unsqueeze(1) + # bsz * 1 * emb_dim + decoder_input = torch.cat((out_enc, start_token), dim=1) + # bsz * (seq_len + 1) * emb_dim + + outputs = [] + for i in range(1, seq_len + 1): + decoder_output = self._2d_attention( + decoder_input, feat, out_enc, valid_ratios=valid_ratios) + char_output = decoder_output[:, i, :] # bsz * num_classes + char_output = F.softmax(char_output, -1) + outputs.append(char_output) + _, max_idx = torch.max(char_output, dim=1, keepdim=False) + char_embedding = self.embedding(max_idx) # bsz * emb_dim + if i < seq_len: + decoder_input[:, i + 1, :] = char_embedding + + outputs = torch.stack(outputs, 1) # bsz * seq_len * num_classes + + return outputs + + +class SARHead(nn.Module): + def __init__(self, + in_channels, + out_channels, + enc_dim=512, + max_text_length=30, + enc_bi_rnn=False, + enc_drop_rnn=0.1, + enc_gru=False, + dec_bi_rnn=False, + dec_drop_rnn=0.0, + dec_gru=False, + d_k=512, + pred_dropout=0.1, + pred_concat=True, + **kwargs): + super(SARHead, self).__init__() + + # encoder module + self.encoder = SAREncoder( + enc_bi_rnn=enc_bi_rnn, + enc_drop_rnn=enc_drop_rnn, + enc_gru=enc_gru, + d_model=in_channels, + d_enc=enc_dim) + + # decoder module + self.decoder = ParallelSARDecoder( + out_channels=out_channels, + enc_bi_rnn=enc_bi_rnn, + dec_bi_rnn=dec_bi_rnn, + dec_drop_rnn=dec_drop_rnn, + dec_gru=dec_gru, + d_model=in_channels, + d_enc=enc_dim, + d_k=d_k, + pred_dropout=pred_dropout, + max_text_length=max_text_length, + pred_concat=pred_concat) + + def forward(self, feat, targets=None): + ''' + img_metas: [label, valid_ratio] + ''' + holistic_feat = self.encoder(feat, targets) # bsz c + + if self.training: + label = targets[0] # label + final_out = self.decoder( + feat, holistic_feat, label, img_metas=targets) + else: + final_out = self.decoder( + feat, + holistic_feat, + label=None, + img_metas=targets, + train_mode=False) + # (bsz, seq_len, num_classes) + + return final_out diff --git a/modules/pytorchocr/modeling/heads/rec_srn_head.py b/modules/pytorchocr/modeling/heads/rec_srn_head.py new file mode 100644 index 0000000..47ebb7c --- /dev/null +++ b/modules/pytorchocr/modeling/heads/rec_srn_head.py @@ -0,0 +1,271 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +import numpy as np +from .self_attention import WrapEncoderForFeature +from .self_attention import WrapEncoder + +from collections import OrderedDict +gradient_clip = 10 + +# https://forums.fast.ai/t/lambda-layer/28507/5 +class Lambda(nn.Module): + "An easy way to create a pytorch layer for a simple `func`." + def __init__(self, func): + "create a layer that simply calls `func` with `x`" + super().__init__() + self.func=func + + def forward(self, x): + return self.func(x) + +class PVAM(nn.Module): + def __init__(self, in_channels, char_num, max_text_length, num_heads, + num_encoder_tus, hidden_dims): + super(PVAM, self).__init__() + self.char_num = char_num + self.max_length = max_text_length + self.num_heads = num_heads + self.num_encoder_TUs = num_encoder_tus + self.hidden_dims = hidden_dims + # Transformer encoder + t = 256 + c = 512 + self.wrap_encoder_for_feature = WrapEncoderForFeature( + src_vocab_size=1, + max_length=t, + n_layer=self.num_encoder_TUs, + n_head=self.num_heads, + d_key=int(self.hidden_dims / self.num_heads), + d_value=int(self.hidden_dims / self.num_heads), + d_model=self.hidden_dims, + d_inner_hid=self.hidden_dims, + prepostprocess_dropout=0.0,#0.1, + attention_dropout=0.0,#0.1, + relu_dropout=0.0,#0.1, + preprocess_cmd="n", + postprocess_cmd="da", + weight_sharing=True) + + # PVAM + self.flatten0 = Lambda(lambda x: torch.flatten(x, start_dim=0, end_dim=1)) + self.fc0 = torch.nn.Linear( + in_features=in_channels, + out_features=in_channels, ) + self.emb = torch.nn.Embedding( + num_embeddings=self.max_length, embedding_dim=in_channels) + self.flatten1 = Lambda(lambda x: torch.flatten(x, start_dim=0, end_dim=2)) + self.fc1 = torch.nn.Linear( + in_features=in_channels, out_features=1, bias=False) + + def forward(self, inputs, encoder_word_pos, gsrm_word_pos): + b, c, h, w = inputs.shape + conv_features = torch.reshape(inputs, shape=[-1, c, h * w]) + conv_features = conv_features.permute(0, 2, 1) + # transformer encoder + b, t, c = conv_features.shape + + enc_inputs = [conv_features, encoder_word_pos, None] + word_features = self.wrap_encoder_for_feature(enc_inputs) + + # pvam + b, t, c = word_features.shape + word_features = self.fc0(word_features) + word_features_ = torch.reshape(word_features, [-1, 1, t, c]) + word_features_ = word_features_.repeat([1, self.max_length, 1, 1]) + word_pos_feature = self.emb(gsrm_word_pos) + word_pos_feature_ = torch.reshape(word_pos_feature, + [-1, self.max_length, 1, c]) + word_pos_feature_ = word_pos_feature_.repeat([1, 1, t, 1]) + y = word_pos_feature_ + word_features_ + y = torch.tanh(y) + attention_weight = self.fc1(y) + attention_weight = torch.reshape( + attention_weight, shape=[-1, self.max_length, t]) + attention_weight = F.softmax(attention_weight, dim=-1) + pvam_features = torch.matmul(attention_weight, + word_features) #[b, max_length, c] + return pvam_features + + +class GSRM(nn.Module): + def __init__(self, in_channels, char_num, max_text_length, num_heads, + num_encoder_tus, num_decoder_tus, hidden_dims): + super(GSRM, self).__init__() + self.char_num = char_num + self.max_length = max_text_length + self.num_heads = num_heads + self.num_encoder_TUs = num_encoder_tus + self.num_decoder_TUs = num_decoder_tus + self.hidden_dims = hidden_dims + + self.fc0 = torch.nn.Linear( + in_features=in_channels, out_features=self.char_num) + self.wrap_encoder0 = WrapEncoder( + src_vocab_size=self.char_num + 1, + max_length=self.max_length, + n_layer=self.num_decoder_TUs, + n_head=self.num_heads, + d_key=int(self.hidden_dims / self.num_heads), + d_value=int(self.hidden_dims / self.num_heads), + d_model=self.hidden_dims, + d_inner_hid=self.hidden_dims, + prepostprocess_dropout=0.0, + attention_dropout=0.0, + relu_dropout=0.0, + preprocess_cmd="n", + postprocess_cmd="da", + weight_sharing=True) + + self.wrap_encoder1 = WrapEncoder( + src_vocab_size=self.char_num + 1, + max_length=self.max_length, + n_layer=self.num_decoder_TUs, + n_head=self.num_heads, + d_key=int(self.hidden_dims / self.num_heads), + d_value=int(self.hidden_dims / self.num_heads), + d_model=self.hidden_dims, + d_inner_hid=self.hidden_dims, + prepostprocess_dropout=0.0, + attention_dropout=0.0, + relu_dropout=0.0, + preprocess_cmd="n", + postprocess_cmd="da", + weight_sharing=True) + + self.mul = lambda x: torch.matmul(x, + self.wrap_encoder0.prepare_decoder.emb0.weight.t(), + ) + + def forward(self, inputs, gsrm_word_pos, gsrm_slf_attn_bias1, + gsrm_slf_attn_bias2): + # ===== GSRM Visual-to-semantic embedding block ===== + b, t, c = inputs.shape + pvam_features = torch.reshape(inputs, [-1, c]) + word_out = self.fc0(pvam_features) + word_ids = torch.argmax(F.softmax(word_out, dim=-1), dim=1) + word_ids = torch.reshape(word_ids, shape=[-1, t, 1]) + + #===== GSRM Semantic reasoning block ===== + """ + This module is achieved through bi-transformers, + ngram_feature1 is the froward one, ngram_fetaure2 is the backward one + """ + pad_idx = self.char_num + word1 = F.pad(word_ids.type(torch.float32), [0, 0, 1, 0, 0, 0], value=1.0 * pad_idx) + word1 = word1.type(torch.int64) + word1 = word1[:, :-1, :] + word2 = word_ids + + enc_inputs_1 = [word1, gsrm_word_pos, gsrm_slf_attn_bias1] + enc_inputs_2 = [word2, gsrm_word_pos, gsrm_slf_attn_bias2] + + gsrm_feature1 = self.wrap_encoder0(enc_inputs_1) + gsrm_feature2 = self.wrap_encoder1(enc_inputs_2) + + gsrm_feature2 = F.pad(gsrm_feature2, [0, 0, 0, 1, 0, 0], + value=0., + ) + gsrm_feature2 = gsrm_feature2[:, 1:, ] + gsrm_features = gsrm_feature1 + gsrm_feature2 + + gsrm_out = self.mul(gsrm_features) + + b, t, c = gsrm_out.shape + gsrm_out = torch.reshape(gsrm_out, [-1, c]) + + return gsrm_features, word_out, gsrm_out + + +class VSFD(nn.Module): + def __init__(self, in_channels=512, pvam_ch=512, char_num=38): + super(VSFD, self).__init__() + self.char_num = char_num + self.fc0 = torch.nn.Linear( + in_features=in_channels * 2, out_features=pvam_ch) + self.fc1 = torch.nn.Linear( + in_features=pvam_ch, out_features=self.char_num) + + def forward(self, pvam_feature, gsrm_feature): + b, t, c1 = pvam_feature.shape + b, t, c2 = gsrm_feature.shape + combine_feature_ = torch.cat([pvam_feature, gsrm_feature], dim=2) + img_comb_feature_ = torch.reshape( + combine_feature_, shape=[-1, c1 + c2]) + img_comb_feature_map = self.fc0(img_comb_feature_) + img_comb_feature_map = torch.sigmoid(img_comb_feature_map) + img_comb_feature_map = torch.reshape( + img_comb_feature_map, shape=[-1, t, c1]) + combine_feature = img_comb_feature_map * pvam_feature + ( + 1.0 - img_comb_feature_map) * gsrm_feature + img_comb_feature = torch.reshape(combine_feature, shape=[-1, c1]) + + out = self.fc1(img_comb_feature) + return out + + +class SRNHead(nn.Module): + def __init__(self, in_channels, out_channels, max_text_length, num_heads, + num_encoder_TUs, num_decoder_TUs, hidden_dims, **kwargs): + super(SRNHead, self).__init__() + self.char_num = out_channels + self.max_length = max_text_length + self.num_heads = num_heads + self.num_encoder_TUs = num_encoder_TUs + self.num_decoder_TUs = num_decoder_TUs + self.hidden_dims = hidden_dims + + self.pvam = PVAM( + in_channels=in_channels, + char_num=self.char_num, + max_text_length=self.max_length, + num_heads=self.num_heads, + num_encoder_tus=self.num_encoder_TUs, + hidden_dims=self.hidden_dims) + + self.gsrm = GSRM( + in_channels=in_channels, + char_num=self.char_num, + max_text_length=self.max_length, + num_heads=self.num_heads, + num_encoder_tus=self.num_encoder_TUs, + num_decoder_tus=self.num_decoder_TUs, + hidden_dims=self.hidden_dims) + self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num) + + self.gsrm.wrap_encoder1.prepare_decoder.emb0 = self.gsrm.wrap_encoder0.prepare_decoder.emb0 + + def forward(self, inputs, others): + encoder_word_pos = others[0] + gsrm_word_pos = others[1].type(torch.long) + gsrm_slf_attn_bias1 = others[2] + gsrm_slf_attn_bias2 = others[3] + + pvam_feature = self.pvam(inputs, encoder_word_pos, gsrm_word_pos) + + gsrm_feature, word_out, gsrm_out = self.gsrm( + pvam_feature, gsrm_word_pos, gsrm_slf_attn_bias1, + gsrm_slf_attn_bias2) + + final_out = self.vsfd(pvam_feature, gsrm_feature) + if not self.training: + final_out = F.softmax(final_out, dim=1) + + _, decoded_out = torch.topk(final_out, k=1) + + predicts = OrderedDict([ + ('predict', final_out), + ('pvam_feature', pvam_feature), + ('decoded_out', decoded_out), + ('word_out', word_out), + ('gsrm_out', gsrm_out), + ]) + + return predicts diff --git a/modules/pytorchocr/modeling/heads/self_attention.py b/modules/pytorchocr/modeling/heads/self_attention.py new file mode 100644 index 0000000..3d37c60 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/self_attention.py @@ -0,0 +1,419 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +import numpy as np +gradient_clip = 10 + + +class WrapEncoderForFeature(nn.Module): + def __init__(self, + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + bos_idx=0): + super(WrapEncoderForFeature, self).__init__() + + self.prepare_encoder = PrepareEncoder( + src_vocab_size, + d_model, + max_length, + prepostprocess_dropout, + bos_idx=bos_idx, + word_emb_param_name="src_word_emb_table") + self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model, + d_inner_hid, prepostprocess_dropout, + attention_dropout, relu_dropout, preprocess_cmd, + postprocess_cmd) + + def forward(self, enc_inputs): + conv_features, src_pos, src_slf_attn_bias = enc_inputs + enc_input = self.prepare_encoder(conv_features, src_pos) + enc_output = self.encoder(enc_input, src_slf_attn_bias) + return enc_output + + +class WrapEncoder(nn.Module): + """ + embedder + encoder + """ + + def __init__(self, + src_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + bos_idx=0): + super(WrapEncoder, self).__init__() + + self.prepare_decoder = PrepareDecoder( + src_vocab_size, + d_model, + max_length, + prepostprocess_dropout, + bos_idx=bos_idx) + self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model, + d_inner_hid, prepostprocess_dropout, + attention_dropout, relu_dropout, preprocess_cmd, + postprocess_cmd) + + def forward(self, enc_inputs): + src_word, src_pos, src_slf_attn_bias = enc_inputs + enc_input = self.prepare_decoder(src_word, src_pos) + enc_output = self.encoder(enc_input, src_slf_attn_bias) + return enc_output + + +class Encoder(nn.Module): + """ + encoder + """ + + def __init__(self, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da"): + + super(Encoder, self).__init__() + + self.encoder_layers = nn.ModuleList() + for i in range(n_layer): + encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, + prepostprocess_dropout, attention_dropout, + relu_dropout, preprocess_cmd, + postprocess_cmd) + self.encoder_layers.add_module("layer_%d" % i, encoderLayer) + self.processer = PrePostProcessLayer(preprocess_cmd, d_model, + prepostprocess_dropout) + + def forward(self, enc_input, attn_bias): + for encoder_layer in self.encoder_layers: + enc_output = encoder_layer(enc_input, attn_bias) + enc_input = enc_output + enc_output = self.processer(enc_output) + + return enc_output + + +class EncoderLayer(nn.Module): + """ + EncoderLayer + """ + + def __init__(self, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da"): + + super(EncoderLayer, self).__init__() + self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, + prepostprocess_dropout) + self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, + attention_dropout) + self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, + prepostprocess_dropout) + + self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, + prepostprocess_dropout) + self.ffn = FFN(d_inner_hid, d_model, relu_dropout) + self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, + prepostprocess_dropout) + + def forward(self, enc_input, attn_bias): + attn_output = self.self_attn( + self.preprocesser1(enc_input), None, None, attn_bias) + attn_output = self.postprocesser1(attn_output, enc_input) + ffn_output = self.ffn(self.preprocesser2(attn_output)) + ffn_output = self.postprocesser2(ffn_output, attn_output) + return ffn_output + + +class MultiHeadAttention(nn.Module): + """ + Multi-Head Attention + """ + + def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.): + super(MultiHeadAttention, self).__init__() + self.n_head = n_head + self.d_key = d_key + self.d_value = d_value + self.d_model = d_model + self.dropout_rate = dropout_rate + self.q_fc = torch.nn.Linear( + in_features=d_model, out_features=d_key * n_head, bias=False) + self.k_fc = torch.nn.Linear( + in_features=d_model, out_features=d_key * n_head, bias=False) + self.v_fc = torch.nn.Linear( + in_features=d_model, out_features=d_value * n_head, bias=False) + self.proj_fc = torch.nn.Linear( + in_features=d_value * n_head, out_features=d_model, bias=False) + + def _prepare_qkv(self, queries, keys, values, cache=None): + if keys is None: # self-attention + keys, values = queries, queries + static_kv = False + else: # cross-attention + static_kv = True + + q = self.q_fc(queries) + q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self.d_key]) + q = q.permute(0, 2, 1, 3) + + if cache is not None and static_kv and "static_k" in cache: + # for encoder-decoder attention in inference and has cached + k = cache["static_k"] + v = cache["static_v"] + else: + k = self.k_fc(keys) + v = self.v_fc(values) + k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key]) + k = k.permute(0, 2, 1, 3) + v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value]) + v = v.permute(0, 2, 1, 3) + + if cache is not None: + if static_kv and not "static_k" in cache: + # for encoder-decoder attention in inference and has not cached + cache["static_k"], cache["static_v"] = k, v + elif not static_kv: + # for decoder self-attention in inference + cache_k, cache_v = cache["k"], cache["v"] + k = torch.cat([cache_k, k], dim=2) + v = torch.cat([cache_v, v], dim=2) + cache["k"], cache["v"] = k, v + + return q, k, v + + def forward(self, queries, keys, values, attn_bias, cache=None): + # compute q ,k ,v + keys = queries if keys is None else keys + values = keys if values is None else values + q, k, v = self._prepare_qkv(queries, keys, values, cache) + + # scale dot product attention + product = torch.matmul(q, k.transpose(2, 3)) + product = product * self.d_model**-0.5 + if attn_bias is not None: + product += attn_bias + weights = F.softmax(product, dim=-1) + if self.dropout_rate: + weights = F.dropout( + weights, p=self.dropout_rate) + out = torch.matmul(weights, v) + + # combine heads + out = out.permute(0, 2, 1, 3) + out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape[2] * out.shape[3]]) + + # project to output + out = self.proj_fc(out) + + return out + + +# https://forums.fast.ai/t/lambda-layer/28507/5 +class Lambda(nn.Module): + "An easy way to create a pytorch layer for a simple `func`." + def __init__(self, func): + "create a layer that simply calls `func` with `x`" + super().__init__() + self.func=func + + def forward(self, x): + return self.func(x) + +class LambdaXY(nn.Module): + "An easy way to create a pytorch layer for a simple `func`." + def __init__(self, func): + "create a layer that simply calls `func` with `x`" + super().__init__() + self.func=func + + def forward(self, x, y): + return self.func(x, y) + + +class PrePostProcessLayer(nn.Module): + """ + PrePostProcessLayer + """ + + def __init__(self, process_cmd, d_model, dropout_rate): + super(PrePostProcessLayer, self).__init__() + self.process_cmd = process_cmd + self.functors = nn.ModuleList() + cur_a_len = 0 + cur_n_len = 0 + cur_d_len = 0 + for cmd in self.process_cmd: + if cmd == "a": # add residual connection + self.functors.add_module('add_res_connect_{}'.format(cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x)) + cur_a_len += 1 + elif cmd == "n": # add layer normalization + layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, + elementwise_affine=True, + eps=1e-5) + self.functors.add_module("layer_norm_%d" % cur_n_len, + layerNorm) + cur_n_len += 1 + + + elif cmd == "d": # add dropout + self.functors.add_module('add_drop_{}'.format(cur_d_len), + Lambda(lambda x: F.dropout( + x, p=dropout_rate) + if dropout_rate else x) + ) + cur_d_len += 1 + + def forward(self, x, residual=None): + for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)): + if cmd == "a": + x = functor(x, residual) + else: + x = functor(x) + + return x + + +class PrepareEncoder(nn.Module): + def __init__(self, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate=0, + bos_idx=0, + word_emb_param_name=None, + pos_enc_param_name=None): + super(PrepareEncoder, self).__init__() + self.src_emb_dim = src_emb_dim + self.src_max_len = src_max_len + self.emb = torch.nn.Embedding( + num_embeddings=self.src_max_len, + embedding_dim=self.src_emb_dim, + sparse=True, + ) + self.dropout_rate = dropout_rate + + def forward(self, src_word, src_pos): + src_word_emb = src_word.type(torch.float32) + src_word_emb = self.src_emb_dim**0.5 * src_word_emb + src_pos = torch.squeeze(src_pos, dim=-1) + src_pos_enc = self.emb(src_pos.type(torch.int64)) + src_pos_enc.stop_gradient = True + enc_input = src_word_emb + src_pos_enc + if self.dropout_rate: + out = F.dropout( + enc_input, p=self.dropout_rate) + else: + out = enc_input + return out + + +class PrepareDecoder(nn.Module): + def __init__(self, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate=0, + bos_idx=0, + word_emb_param_name=None, + pos_enc_param_name=None): + super(PrepareDecoder, self).__init__() + self.src_emb_dim = src_emb_dim + """ + self.emb0 = Embedding(num_embeddings=src_vocab_size, + embedding_dim=src_emb_dim) + """ + self.emb0 = torch.nn.Embedding( + num_embeddings=src_vocab_size, + embedding_dim=self.src_emb_dim, + padding_idx=bos_idx, + ) + self.emb1 = torch.nn.Embedding( + num_embeddings=src_max_len, + embedding_dim=self.src_emb_dim, + ) + self.dropout_rate = dropout_rate + + def forward(self, src_word, src_pos): + src_word = torch.squeeze(src_word.type(torch.int64), dim=-1) + src_word_emb = self.emb0(src_word) + src_word_emb = self.src_emb_dim**0.5 * src_word_emb + src_pos = torch.squeeze(src_pos, dim=-1) + src_pos_enc = self.emb1(src_pos) + src_pos_enc.stop_gradient = True + enc_input = src_word_emb + src_pos_enc + if self.dropout_rate: + out = F.dropout( + enc_input, p=self.dropout_rate) + else: + out = enc_input + return out + + +class FFN(nn.Module): + """ + Feed-Forward Network + """ + + def __init__(self, d_inner_hid, d_model, dropout_rate): + super(FFN, self).__init__() + self.dropout_rate = dropout_rate + self.fc1 = torch.nn.Linear( + in_features=d_model, out_features=d_inner_hid) + self.fc2 = torch.nn.Linear( + in_features=d_inner_hid, out_features=d_model) + + def forward(self, x): + hidden = self.fc1(x) + hidden = F.relu(hidden) + if self.dropout_rate: + hidden = F.dropout( + hidden, p=self.dropout_rate) + out = self.fc2(hidden) + return out diff --git a/modules/pytorchocr/modeling/heads/sr_rensnet_transformer.py b/modules/pytorchocr/modeling/heads/sr_rensnet_transformer.py new file mode 100644 index 0000000..92d93d9 --- /dev/null +++ b/modules/pytorchocr/modeling/heads/sr_rensnet_transformer.py @@ -0,0 +1,419 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FudanVI/FudanOCR/blob/main/text-gestalt/loss/transformer_english_decomposition.py +""" +import copy +import math +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def subsequent_mask(size): + """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). + Unmasked positions are filled with float(0.0). + """ + mask = torch.ones(1, size, size, dtype=torch.float32) + mask_inf = torch.triu( + torch.full( + size=[1, size, size], fill_value=-np.inf, dtype=torch.float32), + diagonal=1) + mask = mask + mask_inf + padding_mask = torch.equal(mask, torch.Tensor(1).type(mask.dtype)) + return padding_mask + + + +def clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) + + +def attention(query, key, value, mask=None, dropout=None, attention_map=None): + d_k = query.shape[-1] + scores = torch.matmul(query, + key.transpose(-2, -1)) / math.sqrt(d_k) + + if mask is not None: + scores = scores.masked_fill(mask == 0, float('-inf')) + else: + pass + + p_attn = F.softmax(scores, dim=-1) + + if dropout is not None: + p_attn = dropout(p_attn) + return torch.matmul(p_attn, value), p_attn + + +class MultiHeadedAttention(nn.Module): + def __init__(self, h, d_model, dropout=0.1, compress_attention=False): + super(MultiHeadedAttention, self).__init__() + assert d_model % h == 0 + self.d_k = d_model // h + self.h = h + self.linears = clones(nn.Linear(d_model, d_model), 4) + self.attn = None + self.dropout = nn.Dropout(p=dropout) + self.compress_attention = compress_attention + self.compress_attention_linear = nn.Linear(h, 1) + + def forward(self, query, key, value, mask=None, attention_map=None): + if mask is not None: + mask = mask.unsqueeze(1) + nbatches = query.size(0) + + query, key, value = \ + [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) + for l, x in zip(self.linears, (query, key, value))] + + x, attention_map = attention( + query, + key, + value, + mask=mask, + dropout=self.dropout, + attention_map=attention_map) + + x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k) + + return self.linears[-1](x), attention_map + + +class ResNet(nn.Module): + def __init__(self, num_in, block, layers): + super(ResNet, self).__init__() + + self.conv1 = nn.Conv2d(num_in, 64, kernel_size=3, stride=1, padding=1) + self.bn1 = nn.BatchNorm2d(64) + self.relu1 = nn.ReLU(inplace=True) + self.pool = nn.MaxPool2d((2, 2), (2, 2)) + + self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) + self.bn2 = nn.BatchNorm2d(128) + self.relu2 = nn.ReLU(inplace=True) + + self.layer1_pool = nn.MaxPool2d((2, 2), (2, 2)) + self.layer1 = self._make_layer(block, 128, 256, layers[0]) + self.layer1_conv = nn.Conv2d(256, 256, 3, 1, 1) + self.layer1_bn = nn.BatchNorm2d(256) + self.layer1_relu = nn.ReLU(inplace=True) + + self.layer2_pool = nn.MaxPool2d((2, 2), (2, 2)) + self.layer2 = self._make_layer(block, 256, 256, layers[1]) + self.layer2_conv = nn.Conv2d(256, 256, 3, 1, 1) + self.layer2_bn = nn.BatchNorm2d(256) + self.layer2_relu = nn.ReLU(inplace=True) + + self.layer3_pool = nn.MaxPool2d((2, 2), (2, 2)) + self.layer3 = self._make_layer(block, 256, 512, layers[2]) + self.layer3_conv = nn.Conv2d(512, 512, 3, 1, 1) + self.layer3_bn = nn.BatchNorm2d(512) + self.layer3_relu = nn.ReLU(inplace=True) + + self.layer4_pool = nn.MaxPool2d((2, 2), (2, 2)) + self.layer4 = self._make_layer(block, 512, 512, layers[3]) + self.layer4_conv2 = nn.Conv2d(512, 1024, 3, 1, 1) + self.layer4_conv2_bn = nn.BatchNorm2d(1024) + self.layer4_conv2_relu = nn.ReLU(inplace=True) + + def _make_layer(self, block, inplanes, planes, blocks): + + if inplanes != planes: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes, 3, 1, 1), + nn.BatchNorm2d( + planes), ) + else: + downsample = None + layers = [] + layers.append(block(inplanes, planes, downsample)) + for i in range(1, blocks): + layers.append(block(planes, planes, downsample=None)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.pool(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu2(x) + + x = self.layer1_pool(x) + x = self.layer1(x) + x = self.layer1_conv(x) + x = self.layer1_bn(x) + x = self.layer1_relu(x) + + x = self.layer2(x) + x = self.layer2_conv(x) + x = self.layer2_bn(x) + x = self.layer2_relu(x) + + x = self.layer3(x) + x = self.layer3_conv(x) + x = self.layer3_bn(x) + x = self.layer3_relu(x) + + x = self.layer4(x) + x = self.layer4_conv2(x) + x = self.layer4_conv2_bn(x) + x = self.layer4_conv2_relu(x) + + return x + + +class Bottleneck(nn.Module): + def __init__(self, input_dim): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(input_dim, input_dim, 1) + self.bn1 = nn.BatchNorm2d(input_dim) + self.relu = nn.ReLU() + + self.conv2 = nn.Conv2d(input_dim, input_dim, 3, 1, 1) + self.bn2 = nn.BatchNorm2d(input_dim) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += residual + out = self.relu(out) + + return out + + +class PositionalEncoding(nn.Module): + "Implement the PE function." + + def __init__(self, dropout, dim, max_len=5000): + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + + pe = torch.zeros(max_len, dim) + position = torch.arange(0, max_len).unsqueeze(1).float() + div_term = torch.exp( + torch.arange(0, dim, 2).float() * + (-math.log(10000.0) / dim)) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, x): + x = x + self.pe[:, :x.size(1)] + return self.dropout(x) + + +class PositionwiseFeedForward(nn.Module): + "Implements FFN equation." + + def __init__(self, d_model, d_ff, dropout=0.1): + super(PositionwiseFeedForward, self).__init__() + self.w_1 = nn.Linear(d_model, d_ff) + self.w_2 = nn.Linear(d_ff, d_model) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + return self.w_2(self.dropout(F.relu(self.w_1(x)))) + + +class Generator(nn.Module): + "Define standard linear + softmax generation step." + + def __init__(self, d_model, vocab): + super(Generator, self).__init__() + self.proj = nn.Linear(d_model, vocab) + self.relu = nn.ReLU() + + def forward(self, x): + out = self.proj(x) + return out + + +class Embeddings(nn.Module): + def __init__(self, d_model, vocab): + super(Embeddings, self).__init__() + self.lut = nn.Embedding(vocab, d_model) + self.d_model = d_model + + def forward(self, x): + embed = self.lut(x) * math.sqrt(self.d_model) + return embed + + +class LayerNorm(nn.Module): + "Construct a layernorm module (See citation for details)." + + def __init__(self, features, eps=1e-6): + super(LayerNorm, self).__init__() + self.a_2 = nn.parameter.Parameter(torch.ones(features)) + self.b_2 = nn.parameter.Parameter(torch.zeros(features)) + self.eps = eps + + def forward(self, x): + mean = x.mean(-1, keepdim=True) + std = x.std(-1, keepdim=True) + return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 + + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + + self.mask_multihead = MultiHeadedAttention( + h=16, d_model=1024, dropout=0.1) + self.mul_layernorm1 = LayerNorm(1024) + + self.multihead = MultiHeadedAttention(h=16, d_model=1024, dropout=0.1) + self.mul_layernorm2 = LayerNorm(1024) + + self.pff = PositionwiseFeedForward(1024, 2048) + self.mul_layernorm3 = LayerNorm(1024) + + def forward(self, text, conv_feature, attention_map=None): + text_max_length = text.shape[1] + mask = subsequent_mask(text_max_length) + result = text + result = self.mul_layernorm1(result + self.mask_multihead( + text, text, text, mask=mask)[0]) + b, c, h, w = conv_feature.shape + conv_feature = conv_feature.view(b, c, h * w).permute(0, 2, 1).contiguous() + word_image_align, attention_map = self.multihead( + result, + conv_feature, + conv_feature, + mask=None, + attention_map=attention_map) + result = self.mul_layernorm2(result + word_image_align) + result = self.mul_layernorm3(result + self.pff(result)) + + return result, attention_map + + +class BasicBlock(nn.Module): + def __init__(self, inplanes, planes, downsample): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=1, padding=1) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=1) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample != None: + residual = self.downsample(residual) + + out += residual + out = self.relu(out) + + return out + + +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + self.cnn = ResNet(num_in=1, block=BasicBlock, layers=[1, 2, 5, 3]) + + def forward(self, input): + conv_result = self.cnn(input) + return conv_result + + +class Transformer(nn.Module): + def __init__(self, in_channels=1, alphabet='0123456789'): + super(Transformer, self).__init__() + self.alphabet = alphabet + word_n_class = self.get_alphabet_len() + self.embedding_word_with_upperword = Embeddings(512, word_n_class) + self.pe = PositionalEncoding(dim=512, dropout=0.1, max_len=5000) + + self.encoder = Encoder() + self.decoder = Decoder() + self.generator_word_with_upperword = Generator(1024, word_n_class) + + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def get_alphabet_len(self): + return len(self.alphabet) + + def forward(self, image, text_length, text_input, attention_map=None): + if image.shape[1] == 3: + R = image[:, 0:1, :, :] + G = image[:, 1:2, :, :] + B = image[:, 2:3, :, :] + image = 0.299 * R + 0.587 * G + 0.114 * B + + conv_feature = self.encoder(image) # batch, 1024, 8, 32 + max_length = max(text_length) + text_input = text_input[:, :max_length] + + text_embedding = self.embedding_word_with_upperword( + text_input) # batch, text_max_length, 512 + if torch.cuda.is_available(): + postion_embedding = self.pe( + torch.zeros(text_embedding.shape).cuda()).cuda() + else: + postion_embedding = self.pe( + torch.zeros(text_embedding.shape)) # batch, text_max_length, 512 + text_input_with_pe = torch.cat([text_embedding, postion_embedding], 2) # batch, text_max_length, 1024 + batch, seq_len, _ = text_input_with_pe.shape + + text_input_with_pe, word_attention_map = self.decoder( + text_input_with_pe, conv_feature) + + word_decoder_result = self.generator_word_with_upperword( + text_input_with_pe) + + if self.training: + total_length = torch.sum(text_length).data + probs_res = torch.zeros([total_length, self.get_alphabet_len()]).type_as(word_decoder_result.data) + start = 0 + + for index, length in enumerate(text_length): + length = int(length.numpy()) + probs_res[start:start + length, :] = word_decoder_result[ + index, 0:0 + length, :] + + start = start + length + + return probs_res, word_attention_map, None + else: + return word_decoder_result diff --git a/modules/pytorchocr/modeling/heads/table_att_head.py b/modules/pytorchocr/modeling/heads/table_att_head.py new file mode 100644 index 0000000..a2a97ee --- /dev/null +++ b/modules/pytorchocr/modeling/heads/table_att_head.py @@ -0,0 +1,207 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + + +class TableAttentionHead(nn.Module): + def __init__(self, in_channels, hidden_size, loc_type, in_max_len=488, **kwargs): + super(TableAttentionHead, self).__init__() + self.input_size = in_channels[-1] + self.hidden_size = hidden_size + self.elem_num = 30 + self.max_text_length = 100 + self.max_elem_length = kwargs.get('max_elem_length', 500) + self.max_cell_num = 500 + + self.structure_attention_cell = AttentionGRUCell( + self.input_size, hidden_size, self.elem_num, use_gru=False) + self.structure_generator = nn.Linear(hidden_size, self.elem_num) + self.loc_type = loc_type + self.in_max_len = in_max_len + + if self.loc_type == 1: + self.loc_generator = nn.Linear(hidden_size, 4) + else: + if self.in_max_len == 640: + self.loc_fea_trans = nn.Linear(400, self.max_elem_length + 1) + elif self.in_max_len == 800: + self.loc_fea_trans = nn.Linear(625, self.max_elem_length + 1) + else: + self.loc_fea_trans = nn.Linear(256, self.max_elem_length + 1) + self.loc_generator = nn.Linear(self.input_size + hidden_size, 4) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None): + # if and else branch are both needed when you want to assign a variable + # if you modify the var in just one branch, then the modification will not work. + fea = inputs[-1] + if len(fea.shape) == 3: + pass + else: + last_shape = int(np.prod(fea.shape[2:])) # gry added + fea = torch.reshape(fea, [fea.shape[0], fea.shape[1], last_shape]) + # fea = fea.transpose([0, 2, 1]) # (NTC)(batch, width, channels) + fea = fea.permute(0, 2, 1) + batch_size = fea.shape[0] + + hidden = torch.zeros((batch_size, self.hidden_size)) + output_hiddens = [] + if self.training and targets is not None: + raise NotImplementedError + else: + temp_elem = torch.zeros([batch_size], dtype=torch.int32) + structure_probs = None + loc_preds = None + elem_onehots = None + outputs = None + alpha = None + max_elem_length = torch.as_tensor(self.max_elem_length) + i = 0 + while i < max_elem_length + 1: + elem_onehots = self._char_to_onehot( + temp_elem, onehot_dim=self.elem_num) + (outputs, hidden), alpha = self.structure_attention_cell( + hidden, fea, elem_onehots) + output_hiddens.append(torch.unsqueeze(outputs, dim=1)) + structure_probs_step = self.structure_generator(outputs) + temp_elem = structure_probs_step.argmax(dim=1, keepdim=False) + i += 1 + + output = torch.cat(output_hiddens, dim=1) + structure_probs = self.structure_generator(output) + structure_probs = F.softmax(structure_probs, dim=-1) + if self.loc_type == 1: + loc_preds = self.loc_generator(output) + loc_preds = F.sigmoid(loc_preds) + else: + loc_fea = fea.permute(0, 2, 1) + loc_fea = self.loc_fea_trans(loc_fea) + loc_fea = loc_fea.permute(0, 2, 1) + loc_concat = torch.cat([output, loc_fea], dim=2) + loc_preds = self.loc_generator(loc_concat) + loc_preds = F.sigmoid(loc_preds) + return {'structure_probs': structure_probs, 'loc_preds': loc_preds} + + +class AttentionGRUCell(nn.Module): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionGRUCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias=False) + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden), dim=1) + res = torch.add(batch_H_proj, prev_hidden_proj) + res = torch.tanh(res) + e = self.score(res) + alpha = F.softmax(e, dim=1) + alpha = alpha.permute(0, 2, 1) + context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1) + concat_context = torch.cat([context, char_onehots.float()], 1) + cur_hidden = self.rnn(concat_context, prev_hidden) + return (cur_hidden, cur_hidden), alpha + + +class AttentionLSTM(nn.Module): + def __init__(self, in_channels, out_channels, hidden_size, **kwargs): + super(AttentionLSTM, self).__init__() + self.input_size = in_channels + self.hidden_size = hidden_size + self.num_classes = out_channels + + self.attention_cell = AttentionLSTMCell( + in_channels, hidden_size, out_channels, use_gru=False) + self.generator = nn.Linear(hidden_size, out_channels) + + def _char_to_onehot(self, input_char, onehot_dim): + input_ont_hot = F.one_hot(input_char, onehot_dim) + return input_ont_hot + + def forward(self, inputs, targets=None, batch_max_length=25): + batch_size = inputs.shape[0] + num_steps = batch_max_length + + hidden = (torch.zeros((batch_size, self.hidden_size)), torch.zeros( + (batch_size, self.hidden_size))) + output_hiddens = [] + + if targets is not None: + for i in range(num_steps): + # one-hot vectors for a i-th char + char_onehots = self._char_to_onehot( + targets[:, i], onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + + hidden = (hidden[1][0], hidden[1][1]) + output_hiddens.append(torch.unsqueeze(hidden[0], dim=1)) + output = torch.cat(output_hiddens, dim=1) + probs = self.generator(output) + + else: + targets = torch.zeros([batch_size], dtype=torch.int32) + probs = None + + for i in range(num_steps): + char_onehots = self._char_to_onehot( + targets, onehot_dim=self.num_classes) + hidden, alpha = self.attention_cell(hidden, inputs, + char_onehots) + probs_step = self.generator(hidden[0]) + hidden = (hidden[1][0], hidden[1][1]) + if probs is None: + probs = torch.unsqueeze(probs_step, dim=1) + else: + probs = torch.cat( + [probs, torch.unsqueeze( + probs_step, dim=1)], dim=1) + + next_input = probs_step.argmax(dim=1) + + targets = next_input + + return probs + + +class AttentionLSTMCell(nn.Module): + def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): + super(AttentionLSTMCell, self).__init__() + self.i2h = nn.Linear(input_size, hidden_size, bias=False) + self.h2h = nn.Linear(hidden_size, hidden_size) + self.score = nn.Linear(hidden_size, 1, bias=False) + if not use_gru: + self.rnn = nn.LSTMCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + else: + self.rnn = nn.GRUCell( + input_size=input_size + num_embeddings, hidden_size=hidden_size) + + self.hidden_size = hidden_size + + def forward(self, prev_hidden, batch_H, char_onehots): + batch_H_proj = self.i2h(batch_H) + prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden[0]), dim=1) + res = torch.add(batch_H_proj, prev_hidden_proj) + res = torch.tanh(res) + e = self.score(res) + + alpha = F.softmax(e, dim=1) + alpha = alpha.permute(0, 2, 1) + context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1) + concat_context = torch.cat([context, char_onehots.float()], 1) + cur_hidden = self.rnn(concat_context, prev_hidden) + + return (cur_hidden, cur_hidden), alpha \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/__init__.py b/modules/pytorchocr/modeling/necks/__init__.py new file mode 100644 index 0000000..e525810 --- /dev/null +++ b/modules/pytorchocr/modeling/necks/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['build_neck'] + +def build_neck(config): + from .db_fpn import DBFPN, RSEFPN, LKPAN + from .east_fpn import EASTFPN + from .sast_fpn import SASTFPN + from .rnn import SequenceEncoder + from .pg_fpn import PGFPN + from .fpn import FPN + from .fce_fpn import FCEFPN + from .table_fpn import TableFPN + support_dict = ['FPN', 'DBFPN', 'EASTFPN', 'SASTFPN', 'SequenceEncoder', 'PGFPN', 'TableFPN', + 'RSEFPN', 'LKPAN', 'FCEFPN'] + + module_name = config.pop('name') + assert module_name in support_dict, Exception('neck only support {}'.format( + support_dict)) + module_class = eval(module_name)(**config) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/db_fpn.py b/modules/pytorchocr/modeling/necks/db_fpn.py new file mode 100644 index 0000000..6d98ad0 --- /dev/null +++ b/modules/pytorchocr/modeling/necks/db_fpn.py @@ -0,0 +1,414 @@ +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.backbones.det_mobilenet_v3 import SEModule +from pytorchocr.modeling.necks.intracl import IntraCLBlock + +def hard_swish(x, inplace=True): + return x * F.relu6(x + 3., inplace=inplace) / 6. + +class DSConv(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding, + stride=1, + groups=None, + if_act=True, + act="relu", + **kwargs): + super(DSConv, self).__init__() + if groups == None: + groups = in_channels + self.if_act = if_act + self.act = act + self.conv1 = nn.Conv2d( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn1 = nn.BatchNorm2d(in_channels) + + self.conv2 = nn.Conv2d( + in_channels=in_channels, + out_channels=int(in_channels * 4), + kernel_size=1, + stride=1, + bias=False) + + self.bn2 = nn.BatchNorm2d(int(in_channels * 4)) + + self.conv3 = nn.Conv2d( + in_channels=int(in_channels * 4), + out_channels=out_channels, + kernel_size=1, + stride=1, + bias=False) + self._c = [in_channels, out_channels] + if in_channels != out_channels: + self.conv_end = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + bias=False) + + def forward(self, inputs): + + x = self.conv1(inputs) + x = self.bn1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.if_act: + if self.act == "relu": + x = F.relu(x) + elif self.act == "hardswish": + x = hard_swish(x) + else: + print("The activation function({}) is selected incorrectly.". + format(self.act)) + exit() + + x = self.conv3(x) + if self._c[0] != self._c[1]: + x = x + self.conv_end(inputs) + return x + + +class DBFPN(nn.Module): + def __init__(self, in_channels, out_channels, use_asf=False, **kwargs): + super(DBFPN, self).__init__() + self.out_channels = out_channels + self.use_asf = use_asf + + self.in2_conv = nn.Conv2d( + in_channels=in_channels[0], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.in3_conv = nn.Conv2d( + in_channels=in_channels[1], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.in4_conv = nn.Conv2d( + in_channels=in_channels[2], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.in5_conv = nn.Conv2d( + in_channels=in_channels[3], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.p5_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p4_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p3_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p2_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + + if self.use_asf is True: + self.asf = ASFBlock(self.out_channels, self.out_channels // 4) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.in5_conv(c5) + in4 = self.in4_conv(c4) + in3 = self.in3_conv(c3) + in2 = self.in2_conv(c2) + + out4 = in4 + F.interpolate( + in5, scale_factor=2, mode="nearest", )#align_mode=1) # 1/16 + out3 = in3 + F.interpolate( + out4, scale_factor=2, mode="nearest", )#align_mode=1) # 1/8 + out2 = in2 + F.interpolate( + out3, scale_factor=2, mode="nearest", )#align_mode=1) # 1/4 + + p5 = self.p5_conv(in5) + p4 = self.p4_conv(out4) + p3 = self.p3_conv(out3) + p2 = self.p2_conv(out2) + p5 = F.interpolate(p5, scale_factor=8, mode="nearest", )#align_mode=1) + p4 = F.interpolate(p4, scale_factor=4, mode="nearest", )#align_mode=1) + p3 = F.interpolate(p3, scale_factor=2, mode="nearest", )#align_mode=1) + + fuse = torch.cat([p5, p4, p3, p2], dim=1) + + if self.use_asf is True: + fuse = self.asf(fuse, [p5, p4, p3, p2]) + + return fuse + + +class RSELayer(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut=True): + super(RSELayer, self).__init__() + self.out_channels = out_channels + self.in_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + padding=int(kernel_size // 2), + bias=False) + self.se_block = SEModule(self.out_channels) + self.shortcut = shortcut + + def forward(self, ins): + x = self.in_conv(ins) + if self.shortcut: + out = x + self.se_block(x) + else: + out = self.se_block(x) + return out + + +class RSEFPN(nn.Module): + def __init__(self, in_channels, out_channels, shortcut=True, **kwargs): + super(RSEFPN, self).__init__() + self.out_channels = out_channels + self.ins_conv = nn.ModuleList() + self.inp_conv = nn.ModuleList() + self.intracl = False + if 'intracl' in kwargs.keys() and kwargs['intracl'] is True: + self.intracl = kwargs['intracl'] + self.incl1 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl2 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl3 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl4 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + + for i in range(len(in_channels)): + self.ins_conv.append( + RSELayer( + in_channels[i], + out_channels, + kernel_size=1, + shortcut=shortcut)) + self.inp_conv.append( + RSELayer( + out_channels, + out_channels // 4, + kernel_size=3, + shortcut=shortcut)) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.ins_conv[3](c5) + in4 = self.ins_conv[2](c4) + in3 = self.ins_conv[1](c3) + in2 = self.ins_conv[0](c2) + + out4 = in4 + F.upsample( + in5, scale_factor=2, mode="nearest") # 1/16 + out3 = in3 + F.upsample( + out4, scale_factor=2, mode="nearest") # 1/8 + out2 = in2 + F.upsample( + out3, scale_factor=2, mode="nearest") # 1/4 + + p5 = self.inp_conv[3](in5) + p4 = self.inp_conv[2](out4) + p3 = self.inp_conv[1](out3) + p2 = self.inp_conv[0](out2) + + if self.intracl is True: + p5 = self.incl4(p5) + p4 = self.incl3(p4) + p3 = self.incl2(p3) + p2 = self.incl1(p2) + + p5 = F.upsample(p5, scale_factor=8, mode="nearest") + p4 = F.upsample(p4, scale_factor=4, mode="nearest") + p3 = F.upsample(p3, scale_factor=2, mode="nearest") + + fuse = torch.cat([p5, p4, p3, p2], dim=1) + return fuse + + +class LKPAN(nn.Module): + def __init__(self, in_channels, out_channels, mode='large', **kwargs): + super(LKPAN, self).__init__() + self.out_channels = out_channels + + self.ins_conv = nn.ModuleList() + self.inp_conv = nn.ModuleList() + # pan head + self.pan_head_conv = nn.ModuleList() + self.pan_lat_conv = nn.ModuleList() + + if mode.lower() == 'lite': + p_layer = DSConv + elif mode.lower() == 'large': + p_layer = nn.Conv2d + else: + raise ValueError( + "mode can only be one of ['lite', 'large'], but received {}". + format(mode)) + + for i in range(len(in_channels)): + self.ins_conv.append( + nn.Conv2d( + in_channels=in_channels[i], + out_channels=self.out_channels, + kernel_size=1, + bias=False)) + + self.inp_conv.append( + p_layer( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=9, + padding=4, + bias=False)) + + if i > 0: + self.pan_head_conv.append( + nn.Conv2d( + in_channels=self.out_channels // 4, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + stride=2, + bias=False)) + self.pan_lat_conv.append( + p_layer( + in_channels=self.out_channels // 4, + out_channels=self.out_channels // 4, + kernel_size=9, + padding=4, + bias=False)) + self.intracl = False + if 'intracl' in kwargs.keys() and kwargs['intracl'] is True: + self.intracl = kwargs['intracl'] + self.incl1 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl2 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl3 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + self.incl4 = IntraCLBlock(self.out_channels // 4, reduce_factor=2) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.ins_conv[3](c5) + in4 = self.ins_conv[2](c4) + in3 = self.ins_conv[1](c3) + in2 = self.ins_conv[0](c2) + + out4 = in4 + F.upsample( + in5, scale_factor=2, mode="nearest") # 1/16 + out3 = in3 + F.upsample( + out4, scale_factor=2, mode="nearest") # 1/8 + out2 = in2 + F.upsample( + out3, scale_factor=2, mode="nearest") # 1/4 + + f5 = self.inp_conv[3](in5) + f4 = self.inp_conv[2](out4) + f3 = self.inp_conv[1](out3) + f2 = self.inp_conv[0](out2) + + pan3 = f3 + self.pan_head_conv[0](f2) + pan4 = f4 + self.pan_head_conv[1](pan3) + pan5 = f5 + self.pan_head_conv[2](pan4) + + p2 = self.pan_lat_conv[0](f2) + p3 = self.pan_lat_conv[1](pan3) + p4 = self.pan_lat_conv[2](pan4) + p5 = self.pan_lat_conv[3](pan5) + + if self.intracl is True: + p5 = self.incl4(p5) + p4 = self.incl3(p4) + p3 = self.incl2(p3) + p2 = self.incl1(p2) + + p5 = F.upsample(p5, scale_factor=8, mode="nearest") + p4 = F.upsample(p4, scale_factor=4, mode="nearest") + p3 = F.upsample(p3, scale_factor=2, mode="nearest") + + fuse = torch.cat([p5, p4, p3, p2], dim=1) + return fuse + + +class ASFBlock(nn.Module): + """ + This code is refered from: + https://github.com/MhLiao/DB/blob/master/decoders/feature_attention.py + """ + + def __init__(self, in_channels, inter_channels, out_features_num=4): + """ + Adaptive Scale Fusion (ASF) block of DBNet++ + Args: + in_channels: the number of channels in the input data + inter_channels: the number of middle channels + out_features_num: the number of fused stages + """ + super(ASFBlock, self).__init__() + self.in_channels = in_channels + self.inter_channels = inter_channels + self.out_features_num = out_features_num + self.conv = nn.Conv2d(in_channels, inter_channels, 3, padding=1) + + self.spatial_scale = nn.Sequential( + #Nx1xHxW + nn.Conv2d( + in_channels=1, + out_channels=1, + kernel_size=3, + bias=False, + padding=1, + ), + nn.ReLU(), + nn.Conv2d( + in_channels=1, + out_channels=1, + kernel_size=1, + bias=False, + ), + nn.Sigmoid()) + + self.channel_scale = nn.Sequential( + nn.Conv2d( + in_channels=inter_channels, + out_channels=out_features_num, + kernel_size=1, + bias=False, + ), + nn.Sigmoid()) + + def forward(self, fuse_features, features_list): + fuse_features = self.conv(fuse_features) + spatial_x = torch.mean(fuse_features, dim=1, keepdim=True) + attention_scores = self.spatial_scale(spatial_x) + fuse_features + attention_scores = self.channel_scale(attention_scores) + assert len(features_list) == self.out_features_num + + out_list = [] + for i in range(self.out_features_num): + out_list.append(attention_scores[:, i:i + 1] * features_list[i]) + return torch.cat(out_list, dim=1) diff --git a/modules/pytorchocr/modeling/necks/east_fpn.py b/modules/pytorchocr/modeling/necks/east_fpn.py new file mode 100644 index 0000000..e5fdac4 --- /dev/null +++ b/modules/pytorchocr/modeling/necks/east_fpn.py @@ -0,0 +1,184 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import nn +# import paddle.nn.functional as F +# from paddle import ParamAttr + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels,) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class DeConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=1, + if_act=True, + act=None, + name=None): + super(DeConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + + self.deconv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + self.bn = nn.BatchNorm2d( + out_channels, + ) + self.act = act + if act is not None: + self._act = Activation(act) + + + def forward(self, x): + x = self.deconv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class EASTFPN(nn.Module): + def __init__(self, in_channels, model_name, **kwargs): + super(EASTFPN, self).__init__() + self.model_name = model_name + if self.model_name == "large": + self.out_channels = 128 + else: + self.out_channels = 64 + self.in_channels = in_channels[::-1] + self.h1_conv = ConvBNLayer( + in_channels=self.out_channels+self.in_channels[1], + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="unet_h_1") + self.h2_conv = ConvBNLayer( + in_channels=self.out_channels+self.in_channels[2], + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="unet_h_2") + self.h3_conv = ConvBNLayer( + in_channels=self.out_channels+self.in_channels[3], + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="unet_h_3") + self.g0_deconv = DeConvBNLayer( + in_channels=self.in_channels[0], + out_channels=self.out_channels, + kernel_size=4, + stride=2, + padding=1, + if_act=True, + act='relu', + name="unet_g_0") + self.g1_deconv = DeConvBNLayer( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=4, + stride=2, + padding=1, + if_act=True, + act='relu', + name="unet_g_1") + self.g2_deconv = DeConvBNLayer( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=4, + stride=2, + padding=1, + if_act=True, + act='relu', + name="unet_g_2") + self.g3_conv = ConvBNLayer( + in_channels=self.out_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + padding=1, + if_act=True, + act='relu', + name="unet_g_3") + + def forward(self, x): + f = x[::-1] + + h = f[0] + g = self.g0_deconv(h) + # h = paddle.concat([g, f[1]], axis=1) + h = torch.cat([g, f[1]], dim=1) + h = self.h1_conv(h) + g = self.g1_deconv(h) + # h = paddle.concat([g, f[2]], axis=1) + h = torch.cat([g, f[2]], dim=1) + h = self.h2_conv(h) + g = self.g2_deconv(h) + # h = paddle.concat([g, f[3]], axis=1) + h = torch.cat([g, f[3]], dim=1) + h = self.h3_conv(h) + g = self.g3_conv(h) + + return g \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/fce_fpn.py b/modules/pytorchocr/modeling/necks/fce_fpn.py new file mode 100644 index 0000000..8111c9b --- /dev/null +++ b/modules/pytorchocr/modeling/necks/fce_fpn.py @@ -0,0 +1,323 @@ +""" +This code is refer from: +https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.3/ppdet/modeling/necks/fpn.py +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import xavier_normal_ +from torch.nn.init import xavier_uniform_ +# import paddle.nn as nn +# import paddle.nn.functional as F +# from paddle import ParamAttr +# from paddle.nn.initializer import XavierUniform +# from paddle.nn.initializer import Normal +# from paddle.regularizer import L2Decay + +__all__ = ['FCEFPN'] + + +class ConvNormLayer(nn.Module): + def __init__(self, + ch_in, + ch_out, + filter_size, + stride, + groups=1, + norm_type='bn', + norm_decay=0., + norm_groups=32, + lr_scale=1., + freeze_norm=False, + initializer=None): + super(ConvNormLayer, self).__init__() + assert norm_type in ['bn', 'sync_bn', 'gn'] + + bias_attr = False + + self.conv = nn.Conv2d( + in_channels=ch_in, + out_channels=ch_out, + kernel_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + bias=bias_attr) + + norm_lr = 0. if freeze_norm else 1. + # param_attr = ParamAttr( + # learning_rate=norm_lr, + # regularizer=L2Decay(norm_decay) if norm_decay is not None else None) + # bias_attr = ParamAttr( + # learning_rate=norm_lr, + # regularizer=L2Decay(norm_decay) if norm_decay is not None else None) + if norm_type == 'bn': + self.norm = nn.BatchNorm2d( + ch_out, + ) + elif norm_type == 'sync_bn': + self.norm = nn.SyncBatchNorm( + ch_out, + ) + elif norm_type == 'gn': + self.norm = nn.GroupNorm( + num_groups=norm_groups, + num_channels=ch_out, + affine=bias_attr) + + def forward(self, inputs): + out = self.conv(inputs) + out = self.norm(out) + return out + + +class FCEFPN(nn.Module): + """ + Feature Pyramid Network, see https://arxiv.org/abs/1612.03144 + Args: + in_channels (list[int]): input channels of each level which can be + derived from the output shape of backbone by from_config + out_channels (list[int]): output channel of each level + spatial_scales (list[float]): the spatial scales between input feature + maps and original input image which can be derived from the output + shape of backbone by from_config + has_extra_convs (bool): whether to add extra conv to the last level. + default False + extra_stage (int): the number of extra stages added to the last level. + default 1 + use_c5 (bool): Whether to use c5 as the input of extra stage, + otherwise p5 is used. default True + norm_type (string|None): The normalization type in FPN module. If + norm_type is None, norm will not be used after conv and if + norm_type is string, bn, gn, sync_bn are available. default None + norm_decay (float): weight decay for normalization layer weights. + default 0. + freeze_norm (bool): whether to freeze normalization layer. + default False + relu_before_extra_convs (bool): whether to add relu before extra convs. + default False + + """ + + def __init__(self, + in_channels, + out_channels, + spatial_scales=[0.25, 0.125, 0.0625, 0.03125], + has_extra_convs=False, + extra_stage=1, + use_c5=True, + norm_type=None, + norm_decay=0., + freeze_norm=False, + relu_before_extra_convs=True): + super(FCEFPN, self).__init__() + self.out_channels = out_channels + for s in range(extra_stage): + spatial_scales = spatial_scales + [spatial_scales[-1] / 2.] + self.spatial_scales = spatial_scales + self.has_extra_convs = has_extra_convs + self.extra_stage = extra_stage + self.use_c5 = use_c5 + self.relu_before_extra_convs = relu_before_extra_convs + self.norm_type = norm_type + self.norm_decay = norm_decay + self.freeze_norm = freeze_norm + + self.lateral_convs = []#nn.ModuleList() + self.lateral_convs_module = nn.ModuleList() + self.fpn_convs = []#nn.ModuleList() + self.fpn_convs_module = nn.ModuleList() + fan = out_channels * 3 * 3 + + # stage index 0,1,2,3 stands for res2,res3,res4,res5 on ResNet Backbone + # 0 <= st_stage < ed_stage <= 3 + st_stage = 4 - len(in_channels) + ed_stage = st_stage + len(in_channels) - 1 + for i in range(st_stage, ed_stage + 1): + if i == 3: + lateral_name = 'fpn_inner_res5_sum' + else: + lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2) + in_c = in_channels[i - st_stage] + if self.norm_type is not None: + # self.lateral_convs_module.add_module( + # lateral_name, + # ConvNormLayer( + # ch_in=in_c, + # ch_out=out_channels, + # filter_size=1, + # stride=1, + # norm_type=self.norm_type, + # norm_decay=self.norm_decay, + # freeze_norm=self.freeze_norm, + # initializer=None)) + lateral = ConvNormLayer( + ch_in=in_c, + ch_out=out_channels, + filter_size=1, + stride=1, + norm_type=self.norm_type, + norm_decay=self.norm_decay, + freeze_norm=self.freeze_norm, + initializer=None) + else: + # self.lateral_convs_module.add_module( + # lateral_name, + # nn.Conv2d( + # in_channels=in_c, + # out_channels=out_channels, + # kernel_size=1, + # ) + # ) + lateral = nn.Conv2d( + in_channels=in_c, + out_channels=out_channels, + kernel_size=1, + ) + self.lateral_convs_module.add_module(lateral_name, lateral) + self.lateral_convs.append(lateral) + + for i in range(st_stage, ed_stage + 1): + fpn_name = 'fpn_res{}_sum'.format(i + 2) + fpn_conv_module = nn.Sequential() + if self.norm_type is not None: + # fpn_conv_module.add_module( + # fpn_name, + # ConvNormLayer( + # ch_in=out_channels, + # ch_out=out_channels, + # filter_size=3, + # stride=1, + # norm_type=self.norm_type, + # norm_decay=self.norm_decay, + # freeze_norm=self.freeze_norm, + # initializer=None)) + fpn_conv = ConvNormLayer( + ch_in=out_channels, + ch_out=out_channels, + filter_size=3, + stride=1, + norm_type=self.norm_type, + norm_decay=self.norm_decay, + freeze_norm=self.freeze_norm, + initializer=None) + else: + # fpn_conv_module.add_module( + # fpn_name, + # nn.Conv2d( + # in_channels=out_channels, + # out_channels=out_channels, + # kernel_size=3, + # padding=1, + # ) + # ) + fpn_conv = nn.Conv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + ) + self.fpn_convs_module.add_module(fpn_name, fpn_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5) + if self.has_extra_convs: + for i in range(self.extra_stage): + lvl = ed_stage + 1 + i + if i == 0 and self.use_c5: + in_c = in_channels[-1] + else: + in_c = out_channels + extra_fpn_name = 'fpn_{}'.format(lvl + 2) + extra_fpn_conv_module = nn.Sequential() + if self.norm_type is not None: + # extra_fpn_conv_module.add_module( + # extra_fpn_name, + # ConvNormLayer( + # ch_in=in_c, + # ch_out=out_channels, + # filter_size=3, + # stride=2, + # norm_type=self.norm_type, + # norm_decay=self.norm_decay, + # freeze_norm=self.freeze_norm, + # initializer=None)) + extra_fpn_conv = ConvNormLayer( + ch_in=in_c, + ch_out=out_channels, + filter_size=3, + stride=2, + norm_type=self.norm_type, + norm_decay=self.norm_decay, + freeze_norm=self.freeze_norm, + initializer=None) + else: + # extra_fpn_conv_module.add_module( + # extra_fpn_name, + # nn.Conv2d( + # in_channels=in_c, + # out_channels=out_channels, + # kernel_size=3, + # stride=2, + # padding=1, + # ) + # ) + extra_fpn_conv = nn.Conv2d( + in_channels=in_c, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + ) + + self.fpn_convs_module.add_module(extra_fpn_name, extra_fpn_conv) + self.fpn_convs.append(extra_fpn_conv) + + @classmethod + def from_config(cls, cfg, input_shape): + return { + 'in_channels': [i.channels for i in input_shape], + 'spatial_scales': [1.0 / i.stride for i in input_shape], + } + + def forward(self, body_feats): + laterals = [] + num_levels = len(body_feats) + + for i in range(num_levels): + laterals.append(self.lateral_convs[i](body_feats[i])) + + for i in range(1, num_levels): + lvl = num_levels - i + upsample = F.interpolate( + laterals[lvl], + scale_factor=2., + mode='nearest', ) + laterals[lvl - 1] += upsample + + fpn_output = [] + for lvl in range(num_levels): + fpn_output.append(self.fpn_convs[lvl](laterals[lvl])) + + if self.extra_stage > 0: + # use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN) + if not self.has_extra_convs: + assert self.extra_stage == 1, 'extra_stage should be 1 if FPN has not extra convs' + fpn_output.append(torch.max_pool2d(fpn_output[-1], 1, stride=2)) + # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5) + else: + if self.use_c5: + extra_source = body_feats[-1] + else: + extra_source = fpn_output[-1] + fpn_output.append(self.fpn_convs[num_levels](extra_source)) + + for i in range(1, self.extra_stage): + if self.relu_before_extra_convs: + fpn_output.append(self.fpn_convs[num_levels + i](F.relu( + fpn_output[-1]))) + else: + fpn_output.append(self.fpn_convs[num_levels + i]( + fpn_output[-1])) + return fpn_output diff --git a/modules/pytorchocr/modeling/necks/fpn.py b/modules/pytorchocr/modeling/necks/fpn.py new file mode 100644 index 0000000..df1097f --- /dev/null +++ b/modules/pytorchocr/modeling/necks/fpn.py @@ -0,0 +1,109 @@ +""" +This code is refer from: +https://github.com/whai362/PSENet/blob/python3/models/neck/fpn.py +""" + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + + + +class Conv_BN_ReLU(nn.Module): + def __init__(self, + in_planes, + out_planes, + kernel_size=1, + stride=1, + padding=0): + super(Conv_BN_ReLU, self).__init__() + self.conv = nn.Conv2d( + in_planes, + out_planes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=False) + self.bn = nn.BatchNorm2d(out_planes, momentum=0.1) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + + def forward(self, x): + return self.relu(self.bn(self.conv(x))) + + +class FPN(nn.Module): + def __init__(self, in_channels, out_channels): + super(FPN, self).__init__() + + # Top layer + self.toplayer_ = Conv_BN_ReLU( + in_channels[3], out_channels, kernel_size=1, stride=1, padding=0) + # Lateral layers + self.latlayer1_ = Conv_BN_ReLU( + in_channels[2], out_channels, kernel_size=1, stride=1, padding=0) + + self.latlayer2_ = Conv_BN_ReLU( + in_channels[1], out_channels, kernel_size=1, stride=1, padding=0) + + self.latlayer3_ = Conv_BN_ReLU( + in_channels[0], out_channels, kernel_size=1, stride=1, padding=0) + + # Smooth layers + self.smooth1_ = Conv_BN_ReLU( + out_channels, out_channels, kernel_size=3, stride=1, padding=1) + + self.smooth2_ = Conv_BN_ReLU( + out_channels, out_channels, kernel_size=3, stride=1, padding=1) + + self.smooth3_ = Conv_BN_ReLU( + out_channels, out_channels, kernel_size=3, stride=1, padding=1) + + self.out_channels = out_channels * 4 + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def _upsample(self, x, scale=1): + return F.upsample(x, scale_factor=scale, mode='bilinear') + + def _upsample_add(self, x, y, scale=1): + return F.upsample(x, scale_factor=scale, mode='bilinear') + y + + def forward(self, x): + f2, f3, f4, f5 = x + p5 = self.toplayer_(f5) + + f4 = self.latlayer1_(f4) + p4 = self._upsample_add(p5, f4, 2) + p4 = self.smooth1_(p4) + + f3 = self.latlayer2_(f3) + p3 = self._upsample_add(p4, f3, 2) + p3 = self.smooth2_(p3) + + f2 = self.latlayer3_(f2) + p2 = self._upsample_add(p3, f2, 2) + p2 = self.smooth3_(p2) + + p3 = self._upsample(p3, 2) + p4 = self._upsample(p4, 4) + p5 = self._upsample(p5, 8) + + fuse = torch.cat([p2, p3, p4, p5], dim=1) + return fuse diff --git a/modules/pytorchocr/modeling/necks/intracl.py b/modules/pytorchocr/modeling/necks/intracl.py new file mode 100644 index 0000000..414b723 --- /dev/null +++ b/modules/pytorchocr/modeling/necks/intracl.py @@ -0,0 +1,114 @@ +from torch import nn + + +class IntraCLBlock(nn.Module): + def __init__(self, in_channels=96, reduce_factor=4): + super(IntraCLBlock, self).__init__() + self.channels = in_channels + self.rf = reduce_factor + self.conv1x1_reduce_channel = nn.Conv2d( + self.channels, + self.channels // self.rf, + kernel_size=1, + stride=1, + padding=0) + self.conv1x1_return_channel = nn.Conv2d( + self.channels // self.rf, + self.channels, + kernel_size=1, + stride=1, + padding=0) + + self.v_layer_7x1 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(7, 1), + stride=(1, 1), + padding=(3, 0)) + self.v_layer_5x1 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(5, 1), + stride=(1, 1), + padding=(2, 0)) + self.v_layer_3x1 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(3, 1), + stride=(1, 1), + padding=(1, 0)) + + self.q_layer_1x7 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(1, 7), + stride=(1, 1), + padding=(0, 3)) + self.q_layer_1x5 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(1, 5), + stride=(1, 1), + padding=(0, 2)) + self.q_layer_1x3 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(1, 3), + stride=(1, 1), + padding=(0, 1)) + + # base + self.c_layer_7x7 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(7, 7), + stride=(1, 1), + padding=(3, 3)) + self.c_layer_5x5 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(5, 5), + stride=(1, 1), + padding=(2, 2)) + self.c_layer_3x3 = nn.Conv2d( + self.channels // self.rf, + self.channels // self.rf, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1)) + + self.bn = nn.BatchNorm2d(self.channels) + self.relu = nn.ReLU() + + def forward(self, x): + x_new = self.conv1x1_reduce_channel(x) + + x_7_c = self.c_layer_7x7(x_new) + x_7_v = self.v_layer_7x1(x_new) + x_7_q = self.q_layer_1x7(x_new) + x_7 = x_7_c + x_7_v + x_7_q + + x_5_c = self.c_layer_5x5(x_7) + x_5_v = self.v_layer_5x1(x_7) + x_5_q = self.q_layer_1x5(x_7) + x_5 = x_5_c + x_5_v + x_5_q + + x_3_c = self.c_layer_3x3(x_5) + x_3_v = self.v_layer_3x1(x_5) + x_3_q = self.q_layer_1x3(x_5) + x_3 = x_3_c + x_3_v + x_3_q + + x_relation = self.conv1x1_return_channel(x_3) + + x_relation = self.bn(x_relation) + x_relation = self.relu(x_relation) + + return x + x_relation + + +def build_intraclblock_list(num_block): + IntraCLBlock_list = nn.ModuleList() + for i in range(num_block): + IntraCLBlock_list.append(IntraCLBlock()) + + return IntraCLBlock_list \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/pg_fpn.py b/modules/pytorchocr/modeling/necks/pg_fpn.py new file mode 100644 index 0000000..ec48d3c --- /dev/null +++ b/modules/pytorchocr/modeling/necks/pg_fpn.py @@ -0,0 +1,297 @@ + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2d( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + self._batch_norm = nn.BatchNorm2d(out_channels) + self.act = act + if self.act is not None: + self._act = Activation(act_type=self.act, inplace=True) + + + def forward(self, inputs): + y = self._conv(inputs) + y = self._batch_norm(y) + if self.act is not None: + y = self._act(y) + return y + + +class DeConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=4, + stride=2, + padding=1, + groups=1, + if_act=True, + act=None, + name=None): + super(DeConvBNLayer, self).__init__() + + self.if_act = if_act + self.act = act + self.deconv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False) + self.bn = nn.BatchNorm2d(out_channels) + self.act = act + if self.act is not None: + self._act = Activation(act_type=self.act, inplace=True) + + def forward(self, x): + x = self.deconv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class PGFPN(nn.Module): + def __init__(self, in_channels, **kwargs): + super(PGFPN, self).__init__() + num_inputs = [2048, 2048, 1024, 512, 256] + num_outputs = [256, 256, 192, 192, 128] + self.out_channels = 128 + self.conv_bn_layer_1 = ConvBNLayer( + in_channels=3, + out_channels=32, + kernel_size=3, + stride=1, + act=None, + name='FPN_d1') + self.conv_bn_layer_2 = ConvBNLayer( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + act=None, + name='FPN_d2') + self.conv_bn_layer_3 = ConvBNLayer( + in_channels=256, + out_channels=128, + kernel_size=3, + stride=1, + act=None, + name='FPN_d3') + self.conv_bn_layer_4 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=2, + act=None, + name='FPN_d4') + self.conv_bn_layer_5 = ConvBNLayer( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name='FPN_d5') + self.conv_bn_layer_6 = ConvBNLayer( + in_channels=64, + out_channels=128, + kernel_size=3, + stride=2, + act=None, + name='FPN_d6') + self.conv_bn_layer_7 = ConvBNLayer( + in_channels=128, + out_channels=128, + kernel_size=3, + stride=1, + act='relu', + name='FPN_d7') + self.conv_bn_layer_8 = ConvBNLayer( + in_channels=128, + out_channels=128, + kernel_size=1, + stride=1, + act=None, + name='FPN_d8') + + self.conv_h0 = ConvBNLayer( + in_channels=num_inputs[0], + out_channels=num_outputs[0], + kernel_size=1, + stride=1, + act=None, + name="conv_h{}".format(0)) + self.conv_h1 = ConvBNLayer( + in_channels=num_inputs[1], + out_channels=num_outputs[1], + kernel_size=1, + stride=1, + act=None, + name="conv_h{}".format(1)) + self.conv_h2 = ConvBNLayer( + in_channels=num_inputs[2], + out_channels=num_outputs[2], + kernel_size=1, + stride=1, + act=None, + name="conv_h{}".format(2)) + self.conv_h3 = ConvBNLayer( + in_channels=num_inputs[3], + out_channels=num_outputs[3], + kernel_size=1, + stride=1, + act=None, + name="conv_h{}".format(3)) + self.conv_h4 = ConvBNLayer( + in_channels=num_inputs[4], + out_channels=num_outputs[4], + kernel_size=1, + stride=1, + act=None, + name="conv_h{}".format(4)) + + self.dconv0 = DeConvBNLayer( + in_channels=num_outputs[0], + out_channels=num_outputs[0 + 1], + name="dconv_{}".format(0)) + self.dconv1 = DeConvBNLayer( + in_channels=num_outputs[1], + out_channels=num_outputs[1 + 1], + act=None, + name="dconv_{}".format(1)) + self.dconv2 = DeConvBNLayer( + in_channels=num_outputs[2], + out_channels=num_outputs[2 + 1], + act=None, + name="dconv_{}".format(2)) + self.dconv3 = DeConvBNLayer( + in_channels=num_outputs[3], + out_channels=num_outputs[3 + 1], + act=None, + name="dconv_{}".format(3)) + self.conv_g1 = ConvBNLayer( + in_channels=num_outputs[1], + out_channels=num_outputs[1], + kernel_size=3, + stride=1, + act='relu', + name="conv_g{}".format(1)) + self.conv_g2 = ConvBNLayer( + in_channels=num_outputs[2], + out_channels=num_outputs[2], + kernel_size=3, + stride=1, + act='relu', + name="conv_g{}".format(2)) + self.conv_g3 = ConvBNLayer( + in_channels=num_outputs[3], + out_channels=num_outputs[3], + kernel_size=3, + stride=1, + act='relu', + name="conv_g{}".format(3)) + self.conv_g4 = ConvBNLayer( + in_channels=num_outputs[4], + out_channels=num_outputs[4], + kernel_size=3, + stride=1, + act='relu', + name="conv_g{}".format(4)) + self.convf = ConvBNLayer( + in_channels=num_outputs[4], + out_channels=num_outputs[4], + kernel_size=1, + stride=1, + act=None, + name="conv_f{}".format(4)) + + def forward(self, x): + c0, c1, c2, c3, c4, c5, c6 = x + # FPN_Down_Fusion + f = [c0, c1, c2] + g = [None, None, None] + h = [None, None, None] + h[0] = self.conv_bn_layer_1(f[0]) + h[1] = self.conv_bn_layer_2(f[1]) + h[2] = self.conv_bn_layer_3(f[2]) + + g[0] = self.conv_bn_layer_4(h[0]) + g[1] = torch.add(g[0], h[1]) + g[1] = F.relu(g[1]) + g[1] = self.conv_bn_layer_5(g[1]) + g[1] = self.conv_bn_layer_6(g[1]) + + g[2] = torch.add(g[1], h[2]) + g[2] = F.relu(g[2]) + g[2] = self.conv_bn_layer_7(g[2]) + f_down = self.conv_bn_layer_8(g[2]) + + # FPN UP Fusion + f1 = [c6, c5, c4, c3, c2] + g = [None, None, None, None, None] + h = [None, None, None, None, None] + h[0] = self.conv_h0(f1[0]) + h[1] = self.conv_h1(f1[1]) + h[2] = self.conv_h2(f1[2]) + h[3] = self.conv_h3(f1[3]) + h[4] = self.conv_h4(f1[4]) + + g[0] = self.dconv0(h[0]) + g[1] = torch.add(g[0], h[1]) + g[1] = F.relu(g[1]) + g[1] = self.conv_g1(g[1]) + g[1] = self.dconv1(g[1]) + + g[2] = torch.add(g[1], h[2]) + g[2] = F.relu(g[2]) + g[2] = self.conv_g2(g[2]) + g[2] = self.dconv2(g[2]) + + g[3] = torch.add(g[2], h[3]) + g[3] = F.relu(g[3]) + g[3] = self.conv_g3(g[3]) + g[3] = self.dconv3(g[3]) + + g[4] = torch.add(g[3], h[4]) + g[4] = F.relu(g[4]) + g[4] = self.conv_g4(g[4]) + f_up = self.convf(g[4]) + f_common = torch.add(f_down, f_up) + f_common = F.relu(f_common) + return f_common diff --git a/modules/pytorchocr/modeling/necks/rnn.py b/modules/pytorchocr/modeling/necks/rnn.py new file mode 100644 index 0000000..5a353cb --- /dev/null +++ b/modules/pytorchocr/modeling/necks/rnn.py @@ -0,0 +1,205 @@ +import os, sys +import torch +import torch.nn as nn +from pytorchocr.modeling.backbones.rec_svtrnet import Block, ConvBNLayer + +class Im2Seq(nn.Module): + def __init__(self, in_channels, **kwargs): + super().__init__() + self.out_channels = in_channels + + def forward(self, x): + B, C, H, W = x.shape + # assert H == 1 + x = x.squeeze(dim=2) + # x = x.transpose([0, 2, 1]) # paddle (NTC)(batch, width, channels) + x = x.permute(0,2,1) + return x + + +class EncoderWithRNN_(nn.Module): + def __init__(self, in_channels, hidden_size): + super(EncoderWithRNN_, self).__init__() + self.out_channels = hidden_size * 2 + self.rnn1 = nn.LSTM(in_channels, hidden_size, bidirectional=False, batch_first=True, num_layers=2) + self.rnn2 = nn.LSTM(in_channels, hidden_size, bidirectional=False, batch_first=True, num_layers=2) + + def forward(self, x): + self.rnn1.flatten_parameters() + self.rnn2.flatten_parameters() + out1, h1 = self.rnn1(x) + out2, h2 = self.rnn2(torch.flip(x, [1])) + return torch.cat([out1, torch.flip(out2, [1])], 2) + + +class EncoderWithRNN(nn.Module): + def __init__(self, in_channels, hidden_size): + super(EncoderWithRNN, self).__init__() + self.out_channels = hidden_size * 2 + self.lstm = nn.LSTM( + in_channels, hidden_size, num_layers=2, batch_first=True, bidirectional=True) # batch_first:=True + + def forward(self, x): + x, _ = self.lstm(x) + return x + + +class EncoderWithFC(nn.Module): + def __init__(self, in_channels, hidden_size): + super(EncoderWithFC, self).__init__() + self.out_channels = hidden_size + self.fc = nn.Linear( + in_channels, + hidden_size, + bias=True, + ) + + def forward(self, x): + x = self.fc(x) + return x + + +class EncoderWithSVTR(nn.Module): + def __init__( + self, + in_channels, + dims=64, # XS + depth=2, + hidden_dims=120, + use_guide=False, + num_heads=8, + qkv_bias=True, + mlp_ratio=2.0, + drop_rate=0.1, + kernel_size=[3,3], + attn_drop_rate=0.1, + drop_path=0., + qk_scale=None): + super(EncoderWithSVTR, self).__init__() + self.depth = depth + self.use_guide = use_guide + self.conv1 = ConvBNLayer( + in_channels, + in_channels // 8, + kernel_size=kernel_size, + padding=[kernel_size[0] // 2, kernel_size[1] // 2], + act='swish') + self.conv2 = ConvBNLayer( + in_channels // 8, hidden_dims, kernel_size=1, act='swish') + + self.svtr_block = nn.ModuleList([ + Block( + dim=hidden_dims, + num_heads=num_heads, + mixer='Global', + HW=None, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer='swish', + attn_drop=attn_drop_rate, + drop_path=drop_path, + norm_layer='nn.LayerNorm', + epsilon=1e-05, + prenorm=False) for i in range(depth) + ]) + self.norm = nn.LayerNorm(hidden_dims, eps=1e-6) + self.conv3 = ConvBNLayer( + hidden_dims, in_channels, kernel_size=1, act='swish') + # last conv-nxn, the input is concat of input tensor and conv3 output tensor + self.conv4 = ConvBNLayer( + 2 * in_channels, in_channels // 8, padding=1, act='swish') + + self.conv1x1 = ConvBNLayer( + in_channels // 8, dims, kernel_size=1, act='swish') + self.out_channels = dims + self.apply(self._init_weights) + + def _init_weights(self, m): + # weight initialization + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.ConvTranspose2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + + def forward(self, x): + # for use guide + if self.use_guide: + z = x.clone() + z.stop_gradient = True + else: + z = x + # for short cut + h = z + # reduce dim + z = self.conv1(z) + z = self.conv2(z) + # SVTR global block + B, C, H, W = z.shape + z = z.flatten(2).permute(0, 2, 1) + + for blk in self.svtr_block: + z = blk(z) + + z = self.norm(z) + # last stage + z = z.reshape([-1, H, W, C]).permute(0, 3, 1, 2) + z = self.conv3(z) + z = torch.cat((h, z), dim=1) + z = self.conv1x1(self.conv4(z)) + + return z + + +class SequenceEncoder(nn.Module): + def __init__(self, in_channels, encoder_type, hidden_size=48, **kwargs): + super(SequenceEncoder, self).__init__() + self.encoder_reshape = Im2Seq(in_channels) + self.out_channels = self.encoder_reshape.out_channels + self.encoder_type = encoder_type + if encoder_type == 'reshape': + self.only_reshape = True + else: + support_encoder_dict = { + 'reshape': Im2Seq, + 'fc': EncoderWithFC, + 'rnn': EncoderWithRNN, + 'svtr': EncoderWithSVTR, + } + assert encoder_type in support_encoder_dict, '{} must in {}'.format( + encoder_type, support_encoder_dict.keys()) + + if encoder_type == "svtr": + self.encoder = support_encoder_dict[encoder_type]( + self.encoder_reshape.out_channels, **kwargs) + else: + self.encoder = support_encoder_dict[encoder_type]( + self.encoder_reshape.out_channels, hidden_size) + self.out_channels = self.encoder.out_channels + self.only_reshape = False + + def forward(self, x): + if self.encoder_type != 'svtr': + x = self.encoder_reshape(x) + if not self.only_reshape: + x = self.encoder(x) + return x + else: + x = self.encoder(x) + x = self.encoder_reshape(x) + return x \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/sast_fpn.py b/modules/pytorchocr/modeling/necks/sast_fpn.py new file mode 100644 index 0000000..118467f --- /dev/null +++ b/modules/pytorchocr/modeling/necks/sast_fpn.py @@ -0,0 +1,305 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import nn +# import paddle.nn.functional as F +# from paddle import ParamAttr + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + if_act=True, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + # self.conv = nn.Conv2D( + # in_channels=in_channels, + # out_channels=out_channels, + # kernel_size=kernel_size, + # stride=stride, + # padding=(kernel_size - 1) // 2, + # groups=groups, + # weight_attr=ParamAttr(name=name + '_weights'), + # bias_attr=False) + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + + self.bn = nn.BatchNorm2d( + out_channels,) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class DeConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups=1, + if_act=True, + act=None, + name=None): + super(DeConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + + self.deconv = nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False) + self.bn = nn.BatchNorm2d( + out_channels, + ) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.deconv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class FPN_Up_Fusion(nn.Module): + def __init__(self, in_channels): + super(FPN_Up_Fusion, self).__init__() + in_channels = in_channels[::-1] + out_channels = [256, 256, 192, 192, 128] + + self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 1, 1, act=None, name='fpn_up_h0') + self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 1, 1, act=None, name='fpn_up_h1') + self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 1, 1, act=None, name='fpn_up_h2') + self.h3_conv = ConvBNLayer(in_channels[3], out_channels[3], 1, 1, act=None, name='fpn_up_h3') + self.h4_conv = ConvBNLayer(in_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_h4') + + self.g0_conv = DeConvBNLayer(out_channels[0], out_channels[1], 4, 2, act=None, name='fpn_up_g0') + + self.g1_conv = nn.Sequential( + ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_up_g1_1'), + DeConvBNLayer(out_channels[1], out_channels[2], 4, 2, act=None, name='fpn_up_g1_2') + ) + self.g2_conv = nn.Sequential( + ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_up_g2_1'), + DeConvBNLayer(out_channels[2], out_channels[3], 4, 2, act=None, name='fpn_up_g2_2') + ) + self.g3_conv = nn.Sequential( + ConvBNLayer(out_channels[3], out_channels[3], 3, 1, act='relu', name='fpn_up_g3_1'), + DeConvBNLayer(out_channels[3], out_channels[4], 4, 2, act=None, name='fpn_up_g3_2') + ) + + self.g4_conv = nn.Sequential( + ConvBNLayer(out_channels[4], out_channels[4], 3, 1, act='relu', name='fpn_up_fusion_1'), + ConvBNLayer(out_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_fusion_2') + ) + + def _add_relu(self, x1, x2): + # x = paddle.add(x=x1, y=x2) + x = torch.add(x1, x2) + x = F.relu(x) + return x + + def forward(self, x): + f = x[2:][::-1] + h0 = self.h0_conv(f[0]) + h1 = self.h1_conv(f[1]) + h2 = self.h2_conv(f[2]) + h3 = self.h3_conv(f[3]) + h4 = self.h4_conv(f[4]) + + g0 = self.g0_conv(h0) + g1 = self._add_relu(g0, h1) + g1 = self.g1_conv(g1) + g2 = self.g2_conv(self._add_relu(g1, h2)) + g3 = self.g3_conv(self._add_relu(g2, h3)) + g4 = self.g4_conv(self._add_relu(g3, h4)) + + return g4 + + +class FPN_Down_Fusion(nn.Module): + def __init__(self, in_channels): + super(FPN_Down_Fusion, self).__init__() + out_channels = [32, 64, 128] + + self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 3, 1, act=None, name='fpn_down_h0') + self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 3, 1, act=None, name='fpn_down_h1') + self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 3, 1, act=None, name='fpn_down_h2') + + self.g0_conv = ConvBNLayer(out_channels[0], out_channels[1], 3, 2, act=None, name='fpn_down_g0') + + self.g1_conv = nn.Sequential( + ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_down_g1_1'), + ConvBNLayer(out_channels[1], out_channels[2], 3, 2, act=None, name='fpn_down_g1_2') + ) + + self.g2_conv = nn.Sequential( + ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_down_fusion_1'), + ConvBNLayer(out_channels[2], out_channels[2], 1, 1, act=None, name='fpn_down_fusion_2') + ) + + def forward(self, x): + f = x[:3] + h0 = self.h0_conv(f[0]) + h1 = self.h1_conv(f[1]) + h2 = self.h2_conv(f[2]) + g0 = self.g0_conv(h0) + # g1 = paddle.add(x=g0, y=h1) + g1 = torch.add(g0, h1) + g1 = F.relu(g1) + g1 = self.g1_conv(g1) + # g2 = paddle.add(x=g1, y=h2) + g2 = torch.add(g1, h2) + g2 = F.relu(g2) + g2 = self.g2_conv(g2) + return g2 + + +class Cross_Attention(nn.Module): + def __init__(self, in_channels): + super(Cross_Attention, self).__init__() + self.theta_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_theta') + self.phi_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_phi') + self.g_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_g') + + self.fh_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_weight') + self.fh_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_sc') + + self.fv_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_weight') + self.fv_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_sc') + + self.f_attn_conv = ConvBNLayer(in_channels * 2, in_channels, 1, 1, act='relu', name='f_attn') + + def _cal_fweight(self, f, shape): + f_theta, f_phi, f_g = f + # flatten + # f_theta = paddle.transpose(f_theta, [0, 2, 3, 1]) + f_theta = f_theta.permute(0, 2, 3, 1) + # f_theta = paddle.reshape(f_theta, [shape[0] * shape[1], shape[2], 128]) + f_theta = torch.reshape(f_theta, [shape[0] * shape[1], shape[2], 128]) + # f_phi = paddle.transpose(f_phi, [0, 2, 3, 1]) + f_phi = f_phi.permute(0, 2, 3, 1) + # f_phi = paddle.reshape(f_phi, [shape[0] * shape[1], shape[2], 128]) + f_phi = torch.reshape(f_phi, [shape[0] * shape[1], shape[2], 128]) + # f_g = paddle.transpose(f_g, [0, 2, 3, 1]) + f_g = f_g.permute(0, 2, 3, 1) + # f_g = paddle.reshape(f_g, [shape[0] * shape[1], shape[2], 128]) + f_g = torch.reshape(f_g, [shape[0] * shape[1], shape[2], 128]) + # correlation + # f_attn = paddle.matmul(f_theta, paddle.transpose(f_phi, [0, 2, 1])) + f_attn = torch.matmul(f_theta, f_phi.permute(0, 2, 1)) + # scale + f_attn = f_attn / (128 ** 0.5) + f_attn = F.softmax(f_attn, dim=-1) + # weighted sum + # f_weight = paddle.matmul(f_attn, f_g) + f_weight = torch.matmul(f_attn, f_g) + # f_weight = paddle.reshape( + # f_weight, [shape[0], shape[1], shape[2], 128]) + f_weight = torch.reshape( + f_weight, [shape[0], shape[1], shape[2], 128]) + return f_weight + + def forward(self, f_common): + # f_shape = paddle.shape(f_common) + f_shape = f_common.size() + # print('f_shape: ', f_shape) + + f_theta = self.theta_conv(f_common) + f_phi = self.phi_conv(f_common) + f_g = self.g_conv(f_common) + + ######## horizon ######## + fh_weight = self._cal_fweight([f_theta, f_phi, f_g], + [f_shape[0], f_shape[2], f_shape[3]]) + # fh_weight = paddle.transpose(fh_weight, [0, 3, 1, 2]) + fh_weight = fh_weight.permute(0, 3, 1, 2) + fh_weight = self.fh_weight_conv(fh_weight) + # short cut + fh_sc = self.fh_sc_conv(f_common) + f_h = F.relu(fh_weight + fh_sc) + + ######## vertical ######## + # fv_theta = paddle.transpose(f_theta, [0, 1, 3, 2]) + fv_theta = f_theta.permute(0, 1, 3, 2) + # fv_phi = paddle.transpose(f_phi, [0, 1, 3, 2]) + fv_phi = f_phi.permute(0, 1, 3, 2) + # fv_g = paddle.transpose(f_g, [0, 1, 3, 2]) + fv_g = f_g.permute(0, 1, 3, 2) + fv_weight = self._cal_fweight([fv_theta, fv_phi, fv_g], + [f_shape[0], f_shape[3], f_shape[2]]) + # fv_weight = paddle.transpose(fv_weight, [0, 3, 2, 1]) + fv_weight = fv_weight.permute(0, 3, 2, 1) + fv_weight = self.fv_weight_conv(fv_weight) + # short cut + fv_sc = self.fv_sc_conv(f_common) + f_v = F.relu(fv_weight + fv_sc) + + ######## merge ######## + # f_attn = paddle.concat([f_h, f_v], axis=1) + f_attn = torch.cat([f_h, f_v], dim=1) + f_attn = self.f_attn_conv(f_attn) + return f_attn + + +class SASTFPN(nn.Module): + def __init__(self, in_channels, with_cab=False, **kwargs): + super(SASTFPN, self).__init__() + self.in_channels = in_channels + self.with_cab = with_cab + self.FPN_Down_Fusion = FPN_Down_Fusion(self.in_channels) + self.FPN_Up_Fusion = FPN_Up_Fusion(self.in_channels) + self.out_channels = 128 + self.cross_attention = Cross_Attention(self.out_channels) + + def forward(self, x): + # down fpn + f_down = self.FPN_Down_Fusion(x) + + # up fpn + f_up = self.FPN_Up_Fusion(x) + + # fusion + # f_common = paddle.add(x=f_down, y=f_up) + f_common = torch.add(f_down, f_up) + f_common = F.relu(f_common) + + if self.with_cab: + # print('enhence f_common with CAB.') + f_common = self.cross_attention(f_common) + + return f_common \ No newline at end of file diff --git a/modules/pytorchocr/modeling/necks/table_fpn.py b/modules/pytorchocr/modeling/necks/table_fpn.py new file mode 100644 index 0000000..1d0ee69 --- /dev/null +++ b/modules/pytorchocr/modeling/necks/table_fpn.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation + + +class TableFPN(nn.Module): + def __init__(self, in_channels, out_channels, **kwargs): + super(TableFPN, self).__init__() + self.out_channels = 512 + + self.in2_conv = nn.Conv2d( + in_channels=in_channels[0], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.in3_conv = nn.Conv2d( + in_channels=in_channels[1], + out_channels=self.out_channels, + kernel_size=1, + stride = 1, + bias=False) + self.in4_conv = nn.Conv2d( + in_channels=in_channels[2], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.in5_conv = nn.Conv2d( + in_channels=in_channels[3], + out_channels=self.out_channels, + kernel_size=1, + bias=False) + self.p5_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p4_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p3_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.p2_conv = nn.Conv2d( + in_channels=self.out_channels, + out_channels=self.out_channels // 4, + kernel_size=3, + padding=1, + bias=False) + self.fuse_conv = nn.Conv2d( + in_channels=self.out_channels * 4, + out_channels=512, + kernel_size=3, + padding=1, + bias=False) + + def forward(self, x): + c2, c3, c4, c5 = x + + in5 = self.in5_conv(c5) + in4 = self.in4_conv(c4) + in3 = self.in3_conv(c3) + in2 = self.in2_conv(c2) + + out4 = in4 + F.interpolate( + in5, size=in4.shape[2:4], mode="nearest", )#align_mode=1) # 1/16 + out3 = in3 + F.interpolate( + out4, size=in3.shape[2:4], mode="nearest", )#align_mode=1) # 1/8 + out2 = in2 + F.interpolate( + out3, size=in2.shape[2:4], mode="nearest", )#align_mode=1) # 1/4 + + p4 = F.interpolate(out4, size=in5.shape[2:4], mode="nearest", )#align_mode=1) + p3 = F.interpolate(out3, size=in5.shape[2:4], mode="nearest", )#align_mode=1) + p2 = F.interpolate(out2, size=in5.shape[2:4], mode="nearest", )#align_mode=1) + fuse = torch.cat([in5, p4, p3, p2], dim=1) + fuse_conv = self.fuse_conv(fuse) * 0.005 + return [c5 + fuse_conv] \ No newline at end of file diff --git a/modules/pytorchocr/modeling/transforms/__init__.py b/modules/pytorchocr/modeling/transforms/__init__.py new file mode 100644 index 0000000..f038399 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['build_transform'] + + +def build_transform(config): + from .tps import TPS + from .stn import STN_ON + from .tsrn import TSRN + from .tbsrn import TBSRN + + support_dict = ['TPS', 'STN_ON', 'TSRN', 'TBSRN'] + + module_name = config.pop('name') + assert module_name in support_dict, Exception( + 'transform only support {}'.format(support_dict)) + module_class = eval(module_name)(**config) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/modeling/transforms/stn.py b/modules/pytorchocr/modeling/transforms/stn.py new file mode 100644 index 0000000..dbed2d1 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/stn.py @@ -0,0 +1,121 @@ +""" +This code is refer from: +https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/stn_head.py +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import torch +from torch import nn +from torch.nn import functional as F +import numpy as np + +from .tps_spatial_transformer import TPSSpatialTransformer + + +def conv3x3_block(in_channels, out_channels, stride=1): + n = 3 * 3 * out_channels + w = math.sqrt(2. / n) + conv_layer = nn.Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=True) + block = nn.Sequential(conv_layer, nn.BatchNorm2d(out_channels), nn.ReLU()) + return block + + +class STN(nn.Module): + def __init__(self, in_channels, num_ctrlpoints, activation='none'): + super(STN, self).__init__() + self.in_channels = in_channels + self.num_ctrlpoints = num_ctrlpoints + self.activation = activation + self.stn_convnet = nn.Sequential( + conv3x3_block(in_channels, 32), #32x64 + nn.MaxPool2d( + kernel_size=2, stride=2), + conv3x3_block(32, 64), #16x32 + nn.MaxPool2d( + kernel_size=2, stride=2), + conv3x3_block(64, 128), # 8*16 + nn.MaxPool2d( + kernel_size=2, stride=2), + conv3x3_block(128, 256), # 4*8 + nn.MaxPool2d( + kernel_size=2, stride=2), + conv3x3_block(256, 256), # 2*4, + nn.MaxPool2d( + kernel_size=2, stride=2), + conv3x3_block(256, 256)) # 1*2 + self.stn_fc1 = nn.Sequential( + nn.Linear( + 2 * 256, + 512, + bias=True), + nn.BatchNorm1d(512), + nn.ReLU(inplace=True)) + fc2_bias = self.init_stn() + self.stn_fc2 = nn.Linear( + 512, + num_ctrlpoints * 2, + bias=True) + + def init_stn(self): + margin = 0.01 + sampling_num_per_side = int(self.num_ctrlpoints / 2) + ctrl_pts_x = np.linspace(margin, 1. - margin, sampling_num_per_side) + ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin + ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1 - margin) + ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1) + ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1) + ctrl_points = np.concatenate( + [ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32) + if self.activation == 'none': + pass + elif self.activation == 'sigmoid': + ctrl_points = -np.log(1. / ctrl_points - 1.) + ctrl_points = torch.Tensor(ctrl_points) + # fc2_bias = ctrl_points.view(-1) + fc2_bias = torch.reshape( + ctrl_points, shape=[ctrl_points.shape[0] * ctrl_points.shape[1]]) + return fc2_bias + + def forward(self, x): + x = self.stn_convnet(x) + batch_size, _, h, w = x.shape + # x = x.view(batch_size, -1) + x = torch.reshape(x, shape=(batch_size, -1)) + img_feat = self.stn_fc1(x) + x = self.stn_fc2(0.1 * img_feat) + if self.activation == 'sigmoid': + x = F.sigmoid(x) + # x = x.view(-1, self.num_ctrlpoints, 2) + x = torch.reshape(x, shape=[-1, self.num_ctrlpoints, 2]) + return img_feat, x + + +class STN_ON(nn.Module): + def __init__(self, in_channels, tps_inputsize, tps_outputsize, + num_control_points, tps_margins, stn_activation): + super(STN_ON, self).__init__() + self.tps = TPSSpatialTransformer( + output_image_size=tuple(tps_outputsize), + num_control_points=num_control_points, + margins=tuple(tps_margins)) + self.stn_head = STN(in_channels=in_channels, + num_ctrlpoints=num_control_points, + activation=stn_activation) + self.tps_inputsize = tps_inputsize + self.out_channels = in_channels + + def forward(self, image): + stn_input = torch.nn.functional.interpolate( + image, self.tps_inputsize, mode="bilinear", align_corners=True) + stn_img_feat, ctrl_points = self.stn_head(stn_input) + x, _ = self.tps(image, ctrl_points) + return x diff --git a/modules/pytorchocr/modeling/transforms/tbsrn.py b/modules/pytorchocr/modeling/transforms/tbsrn.py new file mode 100644 index 0000000..8cbaa69 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/tbsrn.py @@ -0,0 +1,267 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FudanVI/FudanOCR/blob/main/scene-text-telescope/model/tbsrn.py +""" + +import math +import warnings +import numpy as np +import torch +from torch import nn +import string + +warnings.filterwarnings("ignore") + +from .tps_spatial_transformer import TPSSpatialTransformer +from .stn import STN as STNHead +from .tsrn import GruBlock, mish, UpsampleBLock +from pytorchocr.modeling.heads.sr_rensnet_transformer import Transformer, LayerNorm, \ + PositionwiseFeedForward, MultiHeadedAttention + + +def positionalencoding2d(d_model, height, width): + """ + :param d_model: dimension of the model + :param height: height of the positions + :param width: width of the positions + :return: d_model*height*width position matrix + """ + if d_model % 4 != 0: + raise ValueError("Cannot use sin/cos positional encoding with " + "odd dimension (got dim={:d})".format(d_model)) + pe = torch.zeros([d_model, height, width]) + # Each dimension use half of d_model + d_model = int(d_model / 2) + div_term = torch.exp(torch.arange(0., d_model, 2) * + -(math.log(10000.0) / d_model)) + pos_w = torch.arange(0., width, dtype=torch.float32).unsqueeze(1) + pos_h = torch.arange(0., height, dtype=torch.float32).unsqueeze(1) + + pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) + pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) + pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) + pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) + + return pe + + +class FeatureEnhancer(nn.Module): + + def __init__(self): + super(FeatureEnhancer, self).__init__() + + self.multihead = MultiHeadedAttention(h=4, d_model=128, dropout=0.1) + self.mul_layernorm1 = LayerNorm(features=128) + + self.pff = PositionwiseFeedForward(128, 128) + self.mul_layernorm3 = LayerNorm(features=128) + + self.linear = nn.Linear(128, 64) + + def forward(self, conv_feature): + ''' + text : (batch, seq_len, embedding_size) + global_info: (batch, embedding_size, 1, 1) + conv_feature: (batch, channel, H, W) + ''' + batch = conv_feature.shape[0] + if torch.cuda.is_available(): + position2d = positionalencoding2d(64, 16, 64).float().cuda().unsqueeze(0).reshape([1, 64, 1024]) + else: + position2d = positionalencoding2d(64, 16, 64).float().unsqueeze(0).reshape([1, 64, 1024]) + position2d = position2d.repeat(batch, 1, 1) + conv_feature = torch.cat([conv_feature, position2d], 1) # batch, 128(64+64), 32, 128 + result = conv_feature.permute(0, 2, 1).contiguous() + origin_result = result + result = self.mul_layernorm1(origin_result + self.multihead(result, result, result, mask=None)[0]) + origin_result = result + result = self.mul_layernorm3(origin_result + self.pff(result)) + result = self.linear(result) + return result.permute(0, 2, 1).contiguous() + + +def str_filt(str_, voc_type): + alpha_dict = { + 'digit': string.digits, + 'lower': string.digits + string.ascii_lowercase, + 'upper': string.digits + string.ascii_letters, + 'all': string.digits + string.ascii_letters + string.punctuation + } + if voc_type == 'lower': + str_ = str_.lower() + for char in str_: + if char not in alpha_dict[voc_type]: + str_ = str_.replace(char, '') + str_ = str_.lower() + return str_ + + +class TBSRN(nn.Module): + def __init__(self, + in_channels=3, + scale_factor=2, + width=128, + height=32, + STN=True, + srb_nums=5, + mask=False, + hidden_units=32, + infer_mode=False): + super(TBSRN, self).__init__() + in_planes = 3 + if mask: + in_planes = 4 + assert math.log(scale_factor, 2) % 1 == 0 + upsample_block_num = int(math.log(scale_factor, 2)) + self.block1 = nn.Sequential( + nn.Conv2d(in_planes, 2 * hidden_units, kernel_size=9, padding=4), + nn.PReLU() + # nn.ReLU() + ) + self.srb_nums = srb_nums + for i in range(srb_nums): + setattr(self, 'block%d' % (i + 2), RecurrentResidualBlock(2 * hidden_units)) + + setattr(self, 'block%d' % (srb_nums + 2), + nn.Sequential( + nn.Conv2d(2 * hidden_units, 2 * hidden_units, kernel_size=3, padding=1), + nn.BatchNorm2d(2 * hidden_units) + )) + + # self.non_local = NonLocalBlock2D(64, 64) + block_ = [UpsampleBLock(2 * hidden_units, 2) for _ in range(upsample_block_num)] + block_.append(nn.Conv2d(2 * hidden_units, in_planes, kernel_size=9, padding=4)) + setattr(self, 'block%d' % (srb_nums + 3), nn.Sequential(*block_)) + self.tps_inputsize = [height // scale_factor, width // scale_factor] + tps_outputsize = [height // scale_factor, width // scale_factor] + num_control_points = 20 + tps_margins = [0.05, 0.05] + self.stn = STN + self.out_channels = in_channels + if self.stn: + self.tps = TPSSpatialTransformer( + output_image_size=tuple(tps_outputsize), + num_control_points=num_control_points, + margins=tuple(tps_margins)) + + self.stn_head = STNHead( + in_channels=in_planes, + num_ctrlpoints=num_control_points, + activation='none') + self.infer_mode = infer_mode + + self.english_alphabet = '-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + self.english_dict = {} + for index in range(len(self.english_alphabet)): + self.english_dict[self.english_alphabet[index]] = index + transformer = Transformer(alphabet='-0123456789abcdefghijklmnopqrstuvwxyz') + self.transformer = transformer + for param in self.transformer.parameters(): + param.trainable = False + + def label_encoder(self, label): + batch = len(label) + + length = [len(i) for i in label] + length_tensor = torch.Tensor(length).type(torch.int64) + + max_length = max(length) + input_tensor = np.zeros((batch, max_length)) + for i in range(batch): + for j in range(length[i] - 1): + input_tensor[i][j + 1] = self.english_dict[label[i][j]] + + text_gt = [] + for i in label: + for j in i: + text_gt.append(self.english_dict[j]) + text_gt = torch.Tensor(text_gt).type(torch.int64) + + input_tensor = torch.Tensor(input_tensor).type(torch.int64) + return length_tensor, input_tensor, text_gt + + def forward(self, x): + output = {} + if self.infer_mode: + output["lr_img"] = x + y = x + else: + output["lr_img"] = x[0] + output["hr_img"] = x[1] + y = x[0] + if self.stn and self.training: + _, ctrl_points_x = self.stn_head(y) + y, _ = self.tps(y, ctrl_points_x) + block = {'1': self.block1(y)} + for i in range(self.srb_nums + 1): + block[str(i + 2)] = getattr(self, + 'block%d' % (i + 2))(block[str(i + 1)]) + + block[str(self.srb_nums + 3)] = getattr(self, 'block%d' % (self.srb_nums + 3)) \ + ((block['1'] + block[str(self.srb_nums + 2)])) + + sr_img = torch.tanh(block[str(self.srb_nums + 3)]) + output["sr_img"] = sr_img + + if self.training: + hr_img = x[1] + + # add transformer + label = [str_filt(i, 'lower') + '-' for i in x[2]] + length_tensor, input_tensor, text_gt = self.label_encoder(label) + hr_pred, word_attention_map_gt, hr_correct_list = self.transformer(hr_img, length_tensor, + input_tensor) + sr_pred, word_attention_map_pred, sr_correct_list = self.transformer(sr_img, length_tensor, + input_tensor) + output["hr_img"] = hr_img + output["hr_pred"] = hr_pred + output["text_gt"] = text_gt + output["word_attention_map_gt"] = word_attention_map_gt + output["sr_pred"] = sr_pred + output["word_attention_map_pred"] = word_attention_map_pred + + return output + + +class RecurrentResidualBlock(nn.Module): + def __init__(self, channels): + super(RecurrentResidualBlock, self).__init__() + self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.bn1 = nn.BatchNorm2d(channels) + self.gru1 = GruBlock(channels, channels) + # self.prelu = nn.ReLU() + self.prelu = mish() + self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.bn2 = nn.BatchNorm2d(channels) + self.gru2 = GruBlock(channels, channels) + self.feature_enhancer = FeatureEnhancer() + + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, x): + residual = self.conv1(x) + residual = self.bn1(residual) + residual = self.prelu(residual) + residual = self.conv2(residual) + residual = self.bn2(residual) + + size = residual.shape + residual = residual.reshape([size[0], size[1], -1]) + residual = self.feature_enhancer(residual) + residual = residual.reshape([size[0], size[1], size[2], size[3]]) + return x + residual \ No newline at end of file diff --git a/modules/pytorchocr/modeling/transforms/tps.py b/modules/pytorchocr/modeling/transforms/tps.py new file mode 100644 index 0000000..1fc0cc6 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/tps.py @@ -0,0 +1,301 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os, sys +import torch +import torch.nn as nn +import torch.nn.functional as F +from pytorchocr.modeling.common import Activation +# import paddle +# from paddle import nn, ParamAttr +# from paddle.nn import functional as F +import numpy as np + + +class ConvBNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2, + groups=groups, + bias=False, + ) + bn_name = "bn_" + name + self.bn = nn.BatchNorm2d( + out_channels, ) + self.act = act + if act is not None: + self._act = Activation(act) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.act is not None: + x = self._act(x) + return x + + +class LocalizationNetwork(nn.Module): + def __init__(self, in_channels, num_fiducial, loc_lr, model_name): + super(LocalizationNetwork, self).__init__() + self.F = num_fiducial + F = num_fiducial + if model_name == "large": + num_filters_list = [64, 128, 256, 512] + fc_dim = 256 + else: + num_filters_list = [16, 32, 64, 128] + fc_dim = 64 + + # self.block_list = [] + self.block_list = nn.Sequential() + for fno in range(0, len(num_filters_list)): + num_filters = num_filters_list[fno] + name = "loc_conv%d" % fno + # conv = self.add_sublayer( + # name, + # ConvBNLayer( + # in_channels=in_channels, + # out_channels=num_filters, + # kernel_size=3, + # act='relu', + # name=name)) + conv = ConvBNLayer( + in_channels=in_channels, + out_channels=num_filters, + kernel_size=3, + act='relu', + name=name) + # self.block_list.append(conv) + self.block_list.add_module(name, conv) + if fno == len(num_filters_list) - 1: + pool = nn.AdaptiveAvgPool2d(1) + else: + # pool = nn.MaxPool2D(kernel_size=2, stride=2, padding=0) + pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + in_channels = num_filters + # self.block_list.append(pool) + self.block_list.add_module('{}_pool'.format(name), pool) + name = "loc_fc1" + stdv = 1.0 / math.sqrt(num_filters_list[-1] * 1.0) + self.fc1 = nn.Linear( + in_channels, + fc_dim, + bias=True, + ) + + + # Init fc2 in LocalizationNetwork + initial_bias = self.get_initial_fiducials() + initial_bias = initial_bias.reshape(-1) + name = "loc_fc2" + self.fc2 = nn.Linear( + fc_dim, + F * 2, + bias=True + ) + self.out_channels = F * 2 + + def forward(self, x): + """ + Estimating parameters of geometric transformation + Args: + image: input + Return: + batch_C_prime: the matrix of the geometric transformation + """ + B = x.shape[0] + i = 0 + for block in self.block_list: + x = block(x) + x = x.squeeze(dim=2).squeeze(dim=2) + x = self.fc1(x) + + x = F.relu(x) + x = self.fc2(x) + x = x.reshape(shape=[-1, self.F, 2]) + return x + + def get_initial_fiducials(self): + """ see RARE paper Fig. 6 (a) """ + F = self.F + ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2)) + ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2)) + ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2)) + ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1) + ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1) + initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0) + return initial_bias + + +class GridGenerator(nn.Module): + def __init__(self, in_channels, num_fiducial): + super(GridGenerator, self).__init__() + self.eps = 1e-6 + self.F = num_fiducial + + name = "ex_fc" + self.fc = nn.Linear( + in_channels, + 6, + bias=True + ) + + def forward(self, batch_C_prime, I_r_size): + """ + Generate the grid for the grid_sampler. + Args: + batch_C_prime: the matrix of the geometric transformation + I_r_size: the shape of the input image + Return: + batch_P_prime: the grid for the grid_sampler + """ + C = self.build_C_paddle() + P = self.build_P_paddle(I_r_size) + + inv_delta_C_tensor = self.build_inv_delta_C_paddle(C).type(torch.float32) + P_hat_tensor = self.build_P_hat_paddle( + C, torch.as_tensor(P)).type(torch.float32) + + inv_delta_C_tensor.stop_gradient = True + P_hat_tensor.stop_gradient = True + + batch_C_ex_part_tensor = self.get_expand_tensor(batch_C_prime) + + batch_C_ex_part_tensor.stop_gradient = True + + batch_C_prime_with_zeros = torch.cat( + [batch_C_prime, batch_C_ex_part_tensor], dim=1) + inv_delta_C_tensor = inv_delta_C_tensor.to(batch_C_prime_with_zeros.device) + batch_T = torch.matmul(inv_delta_C_tensor, batch_C_prime_with_zeros) + P_hat_tensor = P_hat_tensor.to(batch_T.device) + batch_P_prime = torch.matmul(P_hat_tensor, batch_T) + return batch_P_prime + + def build_C_paddle(self): + """ Return coordinates of fiducial points in I_r; C """ + F = self.F + ctrl_pts_x = torch.linspace(-1.0, 1.0, int(F / 2), dtype=torch.float64) + ctrl_pts_y_top = -1 * torch.ones([int(F / 2)], dtype=torch.float64) + ctrl_pts_y_bottom = torch.ones([int(F / 2)], dtype=torch.float64) + ctrl_pts_top = torch.stack([ctrl_pts_x, ctrl_pts_y_top], dim=1) + ctrl_pts_bottom = torch.stack([ctrl_pts_x, ctrl_pts_y_bottom], dim=1) + C = torch.cat([ctrl_pts_top, ctrl_pts_bottom], dim=0) + return C # F x 2 + + def build_P_paddle(self, I_r_size): + I_r_height, I_r_width = I_r_size + I_r_grid_x = (torch.arange( + -I_r_width, I_r_width, 2, dtype=torch.float64) + 1.0 + ) / torch.as_tensor(np.array([I_r_width]).astype(np.float64)) + + I_r_grid_y = (torch.arange( + -I_r_height, I_r_height, 2, dtype=torch.float64) + 1.0 + ) / torch.as_tensor(np.array([I_r_height]).astype(np.float64)) + + # P: self.I_r_width x self.I_r_height x 2 + P = torch.stack(torch.meshgrid([I_r_grid_x, I_r_grid_y]), dim=2) + # P = paddle.transpose(P, perm=[1, 0, 2]) + P = P.permute(1, 0, 2) + # n (= self.I_r_width x self.I_r_height) x 2 + return P.reshape([-1, 2]) + + def build_inv_delta_C_paddle(self, C): + """ Return inv_delta_C which is needed to calculate T """ + F = self.F + hat_C = torch.zeros((F, F), dtype=torch.float64) # F x F + for i in range(0, F): + for j in range(i, F): + if i == j: + hat_C[i, j] = 1 + else: + r = torch.norm(C[i] - C[j]) + hat_C[i, j] = r + hat_C[j, i] = r + hat_C = (hat_C**2) * torch.log(hat_C) + delta_C = torch.cat( # F+3 x F+3 + [ + torch.cat( + [torch.ones( + (F, 1), dtype=torch.float64), C, hat_C], dim=1), # F x F+3 + torch.cat( + [ + torch.zeros( + (2, 3), dtype=torch.float64), C.permute(1,0) + ], + dim=1), # 2 x F+3 + torch.cat( + [ + torch.zeros( + (1, 3), dtype=torch.float64), torch.ones( + (1, F), dtype=torch.float64) + ], + dim=1) # 1 x F+3 + ], + dim=0) + inv_delta_C = torch.inverse(delta_C) + return inv_delta_C # F+3 x F+3 + + def build_P_hat_paddle(self, C, P): + F = self.F + eps = self.eps + n = P.shape[0] # n (= self.I_r_width x self.I_r_height) + # P_tile: n x 2 -> n x 1 x 2 -> n x F x 2 + # P_tile = paddle.tile(paddle.unsqueeze(P, axis=1), (1, F, 1)) + P_tile = torch.unsqueeze(P, dim=1).repeat(1, F, 1) + C_tile = torch.unsqueeze(C, dim=0) # 1 x F x 2 + P_diff = P_tile - C_tile # n x F x 2 + # rbf_norm: n x F + rbf_norm = torch.norm(P_diff, p=2, dim=2, keepdim=False) + + # rbf: n x F + # rbf = torch.mul( + # torch.square(rbf_norm), torch.log(rbf_norm + eps)) + rbf = torch.mul( + rbf_norm**2, torch.log(rbf_norm + eps)) + P_hat = torch.cat( + [torch.ones( + (n, 1), dtype=torch.float64), P, rbf], dim=1) + return P_hat # n x F+3 + + def get_expand_tensor(self, batch_C_prime): + B, H, C = batch_C_prime.shape + batch_C_prime = batch_C_prime.reshape([B, H * C]) + batch_C_ex_part_tensor = self.fc(batch_C_prime) + batch_C_ex_part_tensor = batch_C_ex_part_tensor.reshape([-1, 3, 2]) + return batch_C_ex_part_tensor + + +class TPS(nn.Module): + def __init__(self, in_channels, num_fiducial, loc_lr, model_name): + super(TPS, self).__init__() + self.loc_net = LocalizationNetwork(in_channels, num_fiducial, loc_lr, + model_name) + self.grid_generator = GridGenerator(self.loc_net.out_channels, + num_fiducial) + self.out_channels = in_channels + + def forward(self, image): + image.stop_gradient = False + batch_C_prime = self.loc_net(image) + batch_P_prime = self.grid_generator(batch_C_prime, image.shape[2:]) + batch_P_prime = batch_P_prime.reshape( + [-1, image.shape[2], image.shape[3], 2]) + if torch.__version__ < '1.3.0': + batch_I_r = F.grid_sample(image, grid=batch_P_prime) + else: + batch_I_r = F.grid_sample(image, grid=batch_P_prime, align_corners=True) + return batch_I_r diff --git a/modules/pytorchocr/modeling/transforms/tps_spatial_transformer.py b/modules/pytorchocr/modeling/transforms/tps_spatial_transformer.py new file mode 100644 index 0000000..2241307 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/tps_spatial_transformer.py @@ -0,0 +1,136 @@ +""" +This code is refer from: +https://github.com/ayumiymk/aster.pytorch/blob/master/lib/models/tps_spatial_transformer.py +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import torch +from torch import nn +from torch.nn import functional as F +import numpy as np +import itertools + + +def grid_sample(input, grid, canvas=None): + input.stop_gradient = False + output = F.grid_sample(input, grid, align_corners=True) if torch.__version__ >= '1.3.0' else F.grid_sample(input, grid) + if canvas is None: + return output + else: + # input_mask = paddle.ones(shape=input.shape) + input_mask = input.data.new(input.size()).fill_(1) + output_mask = F.grid_sample(input_mask, grid) + padded_output = output * output_mask + canvas * (1 - output_mask) + return padded_output + + +# phi(x1, x2) = r^2 * log(r), where r = ||x1 - x2||_2 +def compute_partial_repr(input_points, control_points): + N = input_points.shape[0] + M = control_points.shape[0] + # pairwise_diff = input_points.view(N, 1, 2) - control_points.view(1, M, 2) + pairwise_diff = torch.reshape( + input_points, shape=[N, 1, 2]) - torch.reshape( + control_points, shape=[1, M, 2]) + # original implementation, very slow + # pairwise_dist = torch.sum(pairwise_diff ** 2, dim = 2) # square of distance + pairwise_diff_square = pairwise_diff * pairwise_diff + pairwise_dist = pairwise_diff_square[:, :, 0] + pairwise_diff_square[:, :, 1] + repr_matrix = 0.5 * pairwise_dist * torch.log(pairwise_dist) + # fix numerical error for 0 * log(0), substitute all nan with 0 + # mask = np.array(repr_matrix != repr_matrix) + # repr_matrix[mask] = 0 + mask = repr_matrix != repr_matrix + repr_matrix.masked_fill_(mask, 0) + return repr_matrix + + +# output_ctrl_pts are specified, according to our task. +def build_output_control_points(num_control_points, margins): + margin_x, margin_y = margins + num_ctrl_pts_per_side = num_control_points // 2 + ctrl_pts_x = np.linspace(margin_x, 1.0 - margin_x, num_ctrl_pts_per_side) + ctrl_pts_y_top = np.ones(num_ctrl_pts_per_side) * margin_y + ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y) + ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1) + ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1) + output_ctrl_pts_arr = np.concatenate( + [ctrl_pts_top, ctrl_pts_bottom], axis=0) + output_ctrl_pts = torch.Tensor(output_ctrl_pts_arr) + return output_ctrl_pts + + +class TPSSpatialTransformer(nn.Module): + def __init__(self, + output_image_size=None, + num_control_points=None, + margins=None): + super(TPSSpatialTransformer, self).__init__() + self.output_image_size = output_image_size + self.num_control_points = num_control_points + self.margins = margins + + self.target_height, self.target_width = output_image_size + target_control_points = build_output_control_points(num_control_points, + margins) + N = num_control_points + + # create padded kernel matrix + forward_kernel = torch.zeros(N + 3, N + 3) + target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points) + forward_kernel[:N, :N].copy_(target_control_partial_repr) + forward_kernel[:N, -3].fill_(1) + forward_kernel[-3, :N].fill_(1) + forward_kernel[:N, -2:].copy_(target_control_points) + forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1)) + # compute inverse matrix + inverse_kernel = torch.inverse(forward_kernel) + + # create target cordinate matrix + HW = self.target_height * self.target_width + target_coordinate = list( + itertools.product( + range(self.target_height), range(self.target_width))) + target_coordinate = torch.Tensor(target_coordinate) # HW x 2 + Y, X = target_coordinate.split(1, dim = 1) + Y = Y / (self.target_height - 1) + X = X / (self.target_width - 1) + target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y) + target_coordinate_partial_repr = compute_partial_repr( + target_coordinate, target_control_points) + target_coordinate_repr = torch.cat( + [ + target_coordinate_partial_repr, + torch.ones(HW, 1), + target_coordinate + ], + dim=1) + + # register precomputed matrices + self.inverse_kernel = inverse_kernel + self.padding_matrix = torch.zeros(3, 2) + self.target_coordinate_repr = target_coordinate_repr + self.target_control_points = target_control_points + + def forward(self, input, source_control_points): + assert source_control_points.ndimension() == 3 + assert source_control_points.shape[1] == self.num_control_points + assert source_control_points.shape[2] == 2 + batch_size = source_control_points.size(0) + + Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1) + mapping_matrix = torch.matmul(self.inverse_kernel, Y) + source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix) + + # grid = source_coordinate.view(-1, self.target_height, self.target_width, 2) + grid = torch.reshape( + source_coordinate, + shape=[-1, self.target_height, self.target_width, 2]) + grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1]. + # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1] + grid = 2.0 * grid - 1.0 + output_maps = grid_sample(input, grid, canvas=None) + return output_maps, source_coordinate diff --git a/modules/pytorchocr/modeling/transforms/tsrn.py b/modules/pytorchocr/modeling/transforms/tsrn.py new file mode 100644 index 0000000..f7ee9c3 --- /dev/null +++ b/modules/pytorchocr/modeling/transforms/tsrn.py @@ -0,0 +1,220 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This code is refer from: +https://github.com/FudanVI/FudanOCR/blob/main/text-gestalt/model/tsrn.py +""" + +import math +import torch +import torch.nn.functional as F +from torch import nn +from collections import OrderedDict +import sys +import numpy as np +import warnings +import math, copy +import cv2 + +warnings.filterwarnings("ignore") + +from .tps_spatial_transformer import TPSSpatialTransformer +from .stn import STN as STN_model +from pytorchocr.modeling.heads.sr_rensnet_transformer import Transformer + + +class TSRN(nn.Module): + def __init__(self, + in_channels, + scale_factor=2, + width=128, + height=32, + STN=False, + srb_nums=5, + mask=False, + hidden_units=32, + infer_mode=False, + **kwargs): + super(TSRN, self).__init__() + in_planes = 3 + if mask: + in_planes = 4 + assert math.log(scale_factor, 2) % 1 == 0 + upsample_block_num = int(math.log(scale_factor, 2)) + self.block1 = nn.Sequential( + nn.Conv2d( + in_planes, 2 * hidden_units, kernel_size=9, padding=4), + nn.PReLU()) + self.srb_nums = srb_nums + for i in range(srb_nums): + setattr(self, 'block%d' % (i + 2), + RecurrentResidualBlock(2 * hidden_units)) + + setattr( + self, + 'block%d' % (srb_nums + 2), + nn.Sequential( + nn.Conv2d( + 2 * hidden_units, + 2 * hidden_units, + kernel_size=3, + padding=1), + nn.BatchNorm2d(2 * hidden_units))) + + block_ = [ + UpsampleBLock(2 * hidden_units, 2) + for _ in range(upsample_block_num) + ] + block_.append( + nn.Conv2d(2 * hidden_units, in_planes, kernel_size=9, padding=4) + ) + setattr(self, 'block%d' % (srb_nums + 3), nn.Sequential(*block_)) + self.tps_inputsize = [height // scale_factor, width // scale_factor] + tps_outputsize = [height // scale_factor, width // scale_factor] + num_control_points = 20 + tps_margins = [0.05, 0.05] + self.stn = STN + if self.stn: + self.tps = TPSSpatialTransformer( + output_image_size=tuple(tps_outputsize), + num_control_points=num_control_points, + margins=tuple(tps_margins)) + + self.stn_head = STN_model( + in_channels=in_planes, + num_ctrlpoints=num_control_points, + activation='none') + self.out_channels = in_channels + + self.r34_transformer = Transformer() + for param in self.r34_transformer.parameters(): + param.trainable = False + self.infer_mode = infer_mode + + def forward(self, x): + output = {} + if self.infer_mode: + output["lr_img"] = x + y = x + else: + output["lr_img"] = x[0] + output["hr_img"] = x[1] + y = x[0] + if self.stn and self.training: + _, ctrl_points_x = self.stn_head(y) + y, _ = self.tps(y, ctrl_points_x) + block = {'1': self.block1(y)} + for i in range(self.srb_nums + 1): + block[str(i + 2)] = getattr(self, + 'block%d' % (i + 2))(block[str(i + 1)]) + + block[str(self.srb_nums + 3)] = getattr(self, 'block%d' % (self.srb_nums + 3)) \ + ((block['1'] + block[str(self.srb_nums + 2)])) + + sr_img = torch.tanh(block[str(self.srb_nums + 3)]) + + output["sr_img"] = sr_img + + if self.training: + hr_img = x[1] + length = x[2] + input_tensor = x[3] + + # add transformer + sr_pred, word_attention_map_pred, _ = self.r34_transformer( + sr_img, length, input_tensor) + + hr_pred, word_attention_map_gt, _ = self.r34_transformer( + hr_img, length, input_tensor) + + output["hr_img"] = hr_img + output["hr_pred"] = hr_pred + output["word_attention_map_gt"] = word_attention_map_gt + output["sr_pred"] = sr_pred + output["word_attention_map_pred"] = word_attention_map_pred + + return output + + +class RecurrentResidualBlock(nn.Module): + def __init__(self, channels): + super(RecurrentResidualBlock, self).__init__() + self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.bn1 = nn.BatchNorm2d(channels) + self.gru1 = GruBlock(channels, channels) + self.prelu = mish() + self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) + self.bn2 = nn.BatchNorm2d(channels) + self.gru2 = GruBlock(channels, channels) + + def forward(self, x): + residual = self.conv1(x) + residual = self.bn1(residual) + residual = self.prelu(residual) + residual = self.conv2(residual) + residual = self.bn2(residual) + residual = self.gru1(residual.permute(0, 1, 3, 2).contiguous()).permute(0, 1, 3, 2).contiguous() + + return self.gru2(x + residual).contiguous() + + +class UpsampleBLock(nn.Module): + def __init__(self, in_channels, up_scale): + super(UpsampleBLock, self).__init__() + self.conv = nn.Conv2d( + in_channels, in_channels * up_scale**2, kernel_size=3, padding=1) + + self.pixel_shuffle = nn.PixelShuffle(up_scale) + self.prelu = mish() + + def forward(self, x): + x = self.conv(x) + x = self.pixel_shuffle(x) + x = self.prelu(x) + return x + + +class mish(nn.Module): + def __init__(self, ): + super(mish, self).__init__() + self.activated = True + + def forward(self, x): + if self.activated: + x = x * (torch.tanh(F.softplus(x))) + return x + + +class GruBlock(nn.Module): + def __init__(self, in_channels, out_channels): + super(GruBlock, self).__init__() + assert out_channels % 2 == 0 + self.conv1 = nn.Conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + self.gru = nn.GRU(out_channels, + out_channels // 2, + bidirectional=True, + batch_first=True, + ) + + def forward(self, x): + # x: b, c, w, h + x = self.conv1(x) + x = x.permute(0, 2, 3, 1).contiguous() # b, w, h, c + batch_size, w, h, c = x.size() + x = x.view(batch_size * w, h, c) # b*w, h, c + x, _ = self.gru(x) + x = x.view(batch_size, w, h, c) + x = x.permute(0, 3, 1, 2).contiguous() + return x diff --git a/modules/pytorchocr/postprocess/__init__.py b/modules/pytorchocr/postprocess/__init__.py new file mode 100644 index 0000000..fc20314 --- /dev/null +++ b/modules/pytorchocr/postprocess/__init__.py @@ -0,0 +1,41 @@ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import copy + +__all__ = ['build_post_process'] + + +def build_post_process(config, global_config=None): + from .db_postprocess import DBPostProcess + from .east_postprocess import EASTPostProcess + from .sast_postprocess import SASTPostProcess + from .fce_postprocess import FCEPostProcess + from .rec_postprocess import CTCLabelDecode, AttnLabelDecode, SRNLabelDecode, TableLabelDecode, \ + NRTRLabelDecode, SARLabelDecode, ViTSTRLabelDecode, RFLLabelDecode + from .cls_postprocess import ClsPostProcess + from .pg_postprocess import PGPostProcess + from .rec_postprocess import CANLabelDecode + + support_dict = [ + 'DBPostProcess', 'EASTPostProcess', 'SASTPostProcess', 'CTCLabelDecode', + 'AttnLabelDecode', 'ClsPostProcess', 'SRNLabelDecode', 'PGPostProcess', + 'TableLabelDecode', 'NRTRLabelDecode', 'SARLabelDecode', 'FCEPostProcess', + 'ViTSTRLabelDecode','CANLabelDecode', 'RFLLabelDecode' + ] + + if config['name'] == 'PSEPostProcess': + from .pse_postprocess import PSEPostProcess + support_dict.append('PSEPostProcess') + + config = copy.deepcopy(config) + module_name = config.pop('name') + if global_config is not None: + config.update(global_config) + assert module_name in support_dict, Exception( + 'post process only support {}, but got {}'.format(support_dict, module_name)) + module_class = eval(module_name)(**config) + return module_class \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/cls_postprocess.py b/modules/pytorchocr/postprocess/cls_postprocess.py new file mode 100644 index 0000000..c9c6aff --- /dev/null +++ b/modules/pytorchocr/postprocess/cls_postprocess.py @@ -0,0 +1,20 @@ +import torch + + +class ClsPostProcess(object): + """ Convert between text-label and text-index """ + + def __init__(self, label_list, **kwargs): + super(ClsPostProcess, self).__init__() + self.label_list = label_list + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, torch.Tensor): + preds = preds.cpu().numpy() + pred_idxs = preds.argmax(axis=1) + decode_out = [(self.label_list[idx], preds[i, idx]) + for i, idx in enumerate(pred_idxs)] + if label is None: + return decode_out + label = [(self.label_list[idx], 1.0) for idx in label] + return decode_out, label \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/db_postprocess.py b/modules/pytorchocr/postprocess/db_postprocess.py new file mode 100644 index 0000000..5305109 --- /dev/null +++ b/modules/pytorchocr/postprocess/db_postprocess.py @@ -0,0 +1,179 @@ +""" +This code is refered from: +https://github.com/WenmuZhou/DBNet.pytorch/blob/master/post_processing/seg_detector_representer.py +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import cv2 +import torch +from shapely.geometry import Polygon +import pyclipper + + +class DBPostProcess(object): + """ + The post process for Differentiable Binarization (DB). + """ + + def __init__(self, + thresh=0.3, + box_thresh=0.7, + max_candidates=1000, + unclip_ratio=2.0, + use_dilation=False, + score_mode="fast", + **kwargs): + self.thresh = thresh + self.box_thresh = box_thresh + self.max_candidates = max_candidates + self.unclip_ratio = unclip_ratio + self.min_size = 3 + self.score_mode = score_mode + assert score_mode in [ + "slow", "fast" + ], "Score mode must be in [slow, fast] but got: {}".format(score_mode) + + self.dilation_kernel = None if not use_dilation else np.array( + [[1, 1], [1, 1]]) + + def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height): + ''' + _bitmap: single map with shape (1, H, W), + whose values are binarized as {0, 1} + ''' + + bitmap = _bitmap + height, width = bitmap.shape + + outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, + cv2.CHAIN_APPROX_SIMPLE) + if len(outs) == 3: + img, contours, _ = outs[0], outs[1], outs[2] + elif len(outs) == 2: + contours, _ = outs[0], outs[1] + + num_contours = min(len(contours), self.max_candidates) + + boxes = [] + scores = [] + for index in range(num_contours): + contour = contours[index] + points, sside = self.get_mini_boxes(contour) + if sside < self.min_size: + continue + points = np.array(points) + if self.score_mode == "fast": + score = self.box_score_fast(pred, points.reshape(-1, 2)) + else: + score = self.box_score_slow(pred, contour) + if self.box_thresh > score: + continue + + box = self.unclip(points).reshape(-1, 1, 2) + box, sside = self.get_mini_boxes(box) + if sside < self.min_size + 2: + continue + box = np.array(box) + + box[:, 0] = np.clip( + np.round(box[:, 0] / width * dest_width), 0, dest_width) + box[:, 1] = np.clip( + np.round(box[:, 1] / height * dest_height), 0, dest_height) + boxes.append(box.astype(np.int16)) + scores.append(score) + return np.array(boxes, dtype=np.int16), scores + + def unclip(self, box): + unclip_ratio = self.unclip_ratio + poly = Polygon(box) + distance = poly.area * unclip_ratio / poly.length + offset = pyclipper.PyclipperOffset() + offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) + expanded = np.array(offset.Execute(distance)) + return expanded + + def get_mini_boxes(self, contour): + bounding_box = cv2.minAreaRect(contour) + points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0]) + + index_1, index_2, index_3, index_4 = 0, 1, 2, 3 + if points[1][1] > points[0][1]: + index_1 = 0 + index_4 = 1 + else: + index_1 = 1 + index_4 = 0 + if points[3][1] > points[2][1]: + index_2 = 2 + index_3 = 3 + else: + index_2 = 3 + index_3 = 2 + + box = [ + points[index_1], points[index_2], points[index_3], points[index_4] + ] + return box, min(bounding_box[1]) + + def box_score_fast(self, bitmap, _box): + ''' + box_score_fast: use bbox mean score as the mean score + ''' + h, w = bitmap.shape[:2] + box = _box.copy() + xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1) + xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1) + ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1) + ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1) + + mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) + box[:, 0] = box[:, 0] - xmin + box[:, 1] = box[:, 1] - ymin + cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1) + return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0] + + def box_score_slow(self, bitmap, contour): + ''' + box_score_slow: use polyon mean score as the mean score + ''' + h, w = bitmap.shape[:2] + contour = contour.copy() + contour = np.reshape(contour, (-1, 2)) + + xmin = np.clip(np.min(contour[:, 0]), 0, w - 1) + xmax = np.clip(np.max(contour[:, 0]), 0, w - 1) + ymin = np.clip(np.min(contour[:, 1]), 0, h - 1) + ymax = np.clip(np.max(contour[:, 1]), 0, h - 1) + + mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) + + contour[:, 0] = contour[:, 0] - xmin + contour[:, 1] = contour[:, 1] - ymin + + cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1) + return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0] + + def __call__(self, outs_dict, shape_list): + pred = outs_dict['maps'] + if isinstance(pred, torch.Tensor): + pred = pred.cpu().numpy() + pred = pred[:, 0, :, :] + segmentation = pred > self.thresh + + boxes_batch = [] + for batch_index in range(pred.shape[0]): + src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] + if self.dilation_kernel is not None: + mask = cv2.dilate( + np.array(segmentation[batch_index]).astype(np.uint8), + self.dilation_kernel) + else: + mask = segmentation[batch_index] + boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask, + src_w, src_h) + + boxes_batch.append({'points': boxes}) + return boxes_batch \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/east_postprocess.py b/modules/pytorchocr/postprocess/east_postprocess.py new file mode 100644 index 0000000..10bb916 --- /dev/null +++ b/modules/pytorchocr/postprocess/east_postprocess.py @@ -0,0 +1,144 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from .locality_aware_nms import nms_locality +import cv2 +# import paddle +import torch + +import os +import sys + + +class EASTPostProcess(object): + """ + The post process for EAST. + """ + + def __init__(self, + score_thresh=0.8, + cover_thresh=0.1, + nms_thresh=0.2, + **kwargs): + + self.score_thresh = score_thresh + self.cover_thresh = cover_thresh + self.nms_thresh = nms_thresh + + # c++ la-nms is faster, but only support python 3.5 + self.is_python35 = False + if sys.version_info.major == 3 and sys.version_info.minor == 5: + self.is_python35 = True + + def restore_rectangle_quad(self, origin, geometry): + """ + Restore rectangle from quadrangle. + """ + # quad + origin_concat = np.concatenate( + (origin, origin, origin, origin), axis=1) # (n, 8) + pred_quads = origin_concat - geometry + pred_quads = pred_quads.reshape((-1, 4, 2)) # (n, 4, 2) + return pred_quads + + def detect(self, + score_map, + geo_map, + score_thresh=0.8, + cover_thresh=0.1, + nms_thresh=0.2): + """ + restore text boxes from score map and geo map + """ + score_map = score_map[0] + geo_map = np.swapaxes(geo_map, 1, 0) + geo_map = np.swapaxes(geo_map, 1, 2) + # filter the score map + xy_text = np.argwhere(score_map > score_thresh) + if len(xy_text) == 0: + return [] + # sort the text boxes via the y axis + xy_text = xy_text[np.argsort(xy_text[:, 0])] + # restore quad proposals + text_box_restored = self.restore_rectangle_quad( + xy_text[:, ::-1] * 4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) + boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32) + boxes[:, :8] = text_box_restored.reshape((-1, 8)) + boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]] + if self.is_python35: + import lanms + boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh) + else: + boxes = nms_locality(boxes.astype(np.float64), nms_thresh) + if boxes.shape[0] == 0: + return [] + # Here we filter some low score boxes by the average score map, + # this is different from the orginal paper. + for i, box in enumerate(boxes): + mask = np.zeros_like(score_map, dtype=np.uint8) + cv2.fillPoly(mask, box[:8].reshape( + (-1, 4, 2)).astype(np.int32) // 4, 1) + boxes[i, 8] = cv2.mean(score_map, mask)[0] + boxes = boxes[boxes[:, 8] > cover_thresh] + return boxes + + def sort_poly(self, p): + """ + Sort polygons. + """ + min_axis = np.argmin(np.sum(p, axis=1)) + p = p[[min_axis, (min_axis + 1) % 4, \ + (min_axis + 2) % 4, (min_axis + 3) % 4]] + if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]): + return p + else: + return p[[0, 3, 2, 1]] + + def __call__(self, outs_dict, shape_list): + score_list = outs_dict['f_score'] + geo_list = outs_dict['f_geo'] + if isinstance(score_list, torch.Tensor): + score_list = score_list.cpu().numpy() + geo_list = geo_list.cpu().numpy() + img_num = len(shape_list) + dt_boxes_list = [] + for ino in range(img_num): + score = score_list[ino] + geo = geo_list[ino] + boxes = self.detect( + score_map=score, + geo_map=geo, + score_thresh=self.score_thresh, + cover_thresh=self.cover_thresh, + nms_thresh=self.nms_thresh) + boxes_norm = [] + if len(boxes) > 0: + h, w = score.shape[1:] + src_h, src_w, ratio_h, ratio_w = shape_list[ino] + boxes = boxes[:, :8].reshape((-1, 4, 2)) + boxes[:, :, 0] /= ratio_w + boxes[:, :, 1] /= ratio_h + for i_box, box in enumerate(boxes): + box = self.sort_poly(box.astype(np.int32)) + if np.linalg.norm(box[0] - box[1]) < 5 \ + or np.linalg.norm(box[3] - box[0]) < 5: + continue + boxes_norm.append(box) + dt_boxes_list.append({'points': np.array(boxes_norm)}) + return dt_boxes_list \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/fce_postprocess.py b/modules/pytorchocr/postprocess/fce_postprocess.py new file mode 100644 index 0000000..ae4633f --- /dev/null +++ b/modules/pytorchocr/postprocess/fce_postprocess.py @@ -0,0 +1,228 @@ +""" +This code is refer from: +https://github.com/open-mmlab/mmocr/blob/v0.3.0/mmocr/models/textdet/postprocess/wrapper.py +""" + +import cv2 +import torch +import numpy as np +from numpy.fft import ifft +from pytorchocr.utils.poly_nms import poly_nms, valid_boundary + + +def fill_hole(input_mask): + h, w = input_mask.shape + canvas = np.zeros((h + 2, w + 2), np.uint8) + canvas[1:h + 1, 1:w + 1] = input_mask.copy() + + mask = np.zeros((h + 4, w + 4), np.uint8) + + cv2.floodFill(canvas, mask, (0, 0), 1) + canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool) + + return ~canvas | input_mask + + +def fourier2poly(fourier_coeff, num_reconstr_points=50): + """ Inverse Fourier transform + Args: + fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1), + with n and k being candidates number and Fourier degree + respectively. + num_reconstr_points (int): Number of reconstructed polygon points. + Returns: + Polygons (ndarray): The reconstructed polygons shaped (n, n') + """ + + a = np.zeros((len(fourier_coeff), num_reconstr_points), dtype='complex') + k = (len(fourier_coeff[0]) - 1) // 2 + + a[:, 0:k + 1] = fourier_coeff[:, k:] + a[:, -k:] = fourier_coeff[:, :k] + + poly_complex = ifft(a) * num_reconstr_points + polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2)) + polygon[:, :, 0] = poly_complex.real + polygon[:, :, 1] = poly_complex.imag + return polygon.astype('int32').reshape((len(fourier_coeff), -1)) + + +class FCEPostProcess(object): + """ + The post process for FCENet. + """ + + def __init__(self, + scales, + fourier_degree=5, + num_reconstr_points=50, + decoding_type='fcenet', + score_thr=0.3, + nms_thr=0.1, + alpha=1.0, + beta=1.0, + box_type='poly', + **kwargs): + + self.scales = scales + self.fourier_degree = fourier_degree + self.num_reconstr_points = num_reconstr_points + self.decoding_type = decoding_type + self.score_thr = score_thr + self.nms_thr = nms_thr + self.alpha = alpha + self.beta = beta + self.box_type = box_type + + def __call__(self, preds, shape_list): + score_maps = [] + for key, value in preds.items(): + if isinstance(value, torch.Tensor): + value = value.numpy() + cls_res = value[:, :4, :, :] + reg_res = value[:, 4:, :, :] + score_maps.append([cls_res, reg_res]) + + return self.get_boundary(score_maps, shape_list) + + def resize_boundary(self, boundaries, scale_factor): + """Rescale boundaries via scale_factor. + + Args: + boundaries (list[list[float]]): The boundary list. Each boundary + with size 2k+1 with k>=4. + scale_factor(ndarray): The scale factor of size (4,). + + Returns: + boundaries (list[list[float]]): The scaled boundaries. + """ + boxes = [] + scores = [] + for b in boundaries: + sz = len(b) + valid_boundary(b, True) + scores.append(b[-1]) + b = (np.array(b[:sz - 1]) * + (np.tile(scale_factor[:2], int( + (sz - 1) / 2)).reshape(1, sz - 1))).flatten().tolist() + boxes.append(np.array(b).reshape([-1, 2])) + + return np.array(boxes, dtype=np.float32), scores + + def get_boundary(self, score_maps, shape_list): + assert len(score_maps) == len(self.scales) + boundaries = [] + for idx, score_map in enumerate(score_maps): + scale = self.scales[idx] + boundaries = boundaries + self._get_boundary_single(score_map, + scale) + + # nms + boundaries = poly_nms(boundaries, self.nms_thr) + boundaries, scores = self.resize_boundary( + boundaries, (1 / shape_list[0, 2:]).tolist()[::-1]) + + boxes_batch = [dict(points=boundaries, scores=scores)] + return boxes_batch + + def _get_boundary_single(self, score_map, scale): + assert len(score_map) == 2 + assert score_map[1].shape[1] == 4 * self.fourier_degree + 2 + + return self.fcenet_decode( + preds=score_map, + fourier_degree=self.fourier_degree, + num_reconstr_points=self.num_reconstr_points, + scale=scale, + alpha=self.alpha, + beta=self.beta, + box_type=self.box_type, + score_thr=self.score_thr, + nms_thr=self.nms_thr) + + def fcenet_decode(self, + preds, + fourier_degree, + num_reconstr_points, + scale, + alpha=1.0, + beta=2.0, + box_type='poly', + score_thr=0.3, + nms_thr=0.1): + """Decoding predictions of FCENet to instances. + + Args: + preds (list(Tensor)): The head output tensors. + fourier_degree (int): The maximum Fourier transform degree k. + num_reconstr_points (int): The points number of the polygon + reconstructed from predicted Fourier coefficients. + scale (int): The down-sample scale of the prediction. + alpha (float) : The parameter to calculate final scores. Score_{final} + = (Score_{text region} ^ alpha) + * (Score_{text center region}^ beta) + beta (float) : The parameter to calculate final score. + box_type (str): Boundary encoding type 'poly' or 'quad'. + score_thr (float) : The threshold used to filter out the final + candidates. + nms_thr (float) : The threshold of nms. + + Returns: + boundaries (list[list[float]]): The instance boundary and confidence + list. + """ + assert isinstance(preds, list) + assert len(preds) == 2 + assert box_type in ['poly', 'quad'] + + cls_pred = preds[0][0] + tr_pred = cls_pred[0:2] + tcl_pred = cls_pred[2:] + + reg_pred = preds[1][0].transpose([1, 2, 0]) + x_pred = reg_pred[:, :, :2 * fourier_degree + 1] + y_pred = reg_pred[:, :, 2 * fourier_degree + 1:] + + score_pred = (tr_pred[1]**alpha) * (tcl_pred[1]**beta) + tr_pred_mask = (score_pred) > score_thr + tr_mask = fill_hole(tr_pred_mask) + + tr_contours, _ = cv2.findContours( + tr_mask.astype(np.uint8), cv2.RETR_TREE, + cv2.CHAIN_APPROX_SIMPLE) # opencv4 + + mask = np.zeros_like(tr_mask) + boundaries = [] + for cont in tr_contours: + deal_map = mask.copy().astype(np.int8) + cv2.drawContours(deal_map, [cont], -1, 1, -1) + + score_map = score_pred * deal_map + score_mask = score_map > 0 + xy_text = np.argwhere(score_mask) + dxy = xy_text[:, 1] + xy_text[:, 0] * 1j + + x, y = x_pred[score_mask], y_pred[score_mask] + c = x + y * 1j + c[:, fourier_degree] = c[:, fourier_degree] + dxy + c *= scale + + polygons = fourier2poly(c, num_reconstr_points) + score = score_map[score_mask].reshape(-1, 1) + polygons = poly_nms(np.hstack((polygons, score)).tolist(), nms_thr) + + boundaries = boundaries + polygons + + boundaries = poly_nms(boundaries, nms_thr) + + if box_type == 'quad': + new_boundaries = [] + for boundary in boundaries: + poly = np.array(boundary[:-1]).reshape(-1, 2).astype(np.float32) + score = boundary[-1] + points = cv2.boxPoints(cv2.minAreaRect(poly)) + points = np.int0(points) + new_boundaries.append(points.reshape(-1).tolist() + [score]) + boundaries = new_boundaries + + return boundaries diff --git a/modules/pytorchocr/postprocess/locality_aware_nms.py b/modules/pytorchocr/postprocess/locality_aware_nms.py new file mode 100644 index 0000000..53280cc --- /dev/null +++ b/modules/pytorchocr/postprocess/locality_aware_nms.py @@ -0,0 +1,199 @@ +""" +Locality aware nms. +""" + +import numpy as np +from shapely.geometry import Polygon + + +def intersection(g, p): + """ + Intersection. + """ + g = Polygon(g[:8].reshape((4, 2))) + p = Polygon(p[:8].reshape((4, 2))) + g = g.buffer(0) + p = p.buffer(0) + if not g.is_valid or not p.is_valid: + return 0 + inter = Polygon(g).intersection(Polygon(p)).area + union = g.area + p.area - inter + if union == 0: + return 0 + else: + return inter / union + + +def intersection_iog(g, p): + """ + Intersection_iog. + """ + g = Polygon(g[:8].reshape((4, 2))) + p = Polygon(p[:8].reshape((4, 2))) + if not g.is_valid or not p.is_valid: + return 0 + inter = Polygon(g).intersection(Polygon(p)).area + #union = g.area + p.area - inter + union = p.area + if union == 0: + print("p_area is very small") + return 0 + else: + return inter / union + + +def weighted_merge(g, p): + """ + Weighted merge. + """ + g[:8] = (g[8] * g[:8] + p[8] * p[:8]) / (g[8] + p[8]) + g[8] = (g[8] + p[8]) + return g + + +def standard_nms(S, thres): + """ + Standard nms. + """ + order = np.argsort(S[:, 8])[::-1] + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + ovr = np.array([intersection(S[i], S[t]) for t in order[1:]]) + + inds = np.where(ovr <= thres)[0] + order = order[inds + 1] + + return S[keep] + + +def standard_nms_inds(S, thres): + """ + Standard nms, retun inds. + """ + order = np.argsort(S[:, 8])[::-1] + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + ovr = np.array([intersection(S[i], S[t]) for t in order[1:]]) + + inds = np.where(ovr <= thres)[0] + order = order[inds + 1] + + return keep + + +def nms(S, thres): + """ + nms. + """ + order = np.argsort(S[:, 8])[::-1] + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + ovr = np.array([intersection(S[i], S[t]) for t in order[1:]]) + + inds = np.where(ovr <= thres)[0] + order = order[inds + 1] + + return keep + + +def soft_nms(boxes_in, Nt_thres=0.3, threshold=0.8, sigma=0.5, method=2): + """ + soft_nms + :para boxes_in, N x 9 (coords + score) + :para threshould, eliminate cases min score(0.001) + :para Nt_thres, iou_threshi + :para sigma, gaussian weght + :method, linear or gaussian + """ + boxes = boxes_in.copy() + N = boxes.shape[0] + if N is None or N < 1: + return np.array([]) + pos, maxpos = 0, 0 + weight = 0.0 + inds = np.arange(N) + tbox, sbox = boxes[0].copy(), boxes[0].copy() + for i in range(N): + maxscore = boxes[i, 8] + maxpos = i + tbox = boxes[i].copy() + ti = inds[i] + pos = i + 1 + #get max box + while pos < N: + if maxscore < boxes[pos, 8]: + maxscore = boxes[pos, 8] + maxpos = pos + pos = pos + 1 + #add max box as a detection + boxes[i, :] = boxes[maxpos, :] + inds[i] = inds[maxpos] + #swap + boxes[maxpos, :] = tbox + inds[maxpos] = ti + tbox = boxes[i].copy() + pos = i + 1 + #NMS iteration + while pos < N: + sbox = boxes[pos].copy() + ts_iou_val = intersection(tbox, sbox) + if ts_iou_val > 0: + if method == 1: + if ts_iou_val > Nt_thres: + weight = 1 - ts_iou_val + else: + weight = 1 + elif method == 2: + weight = np.exp(-1.0 * ts_iou_val**2 / sigma) + else: + if ts_iou_val > Nt_thres: + weight = 0 + else: + weight = 1 + boxes[pos, 8] = weight * boxes[pos, 8] + #if box score falls below thresold, discard the box by + #swaping last box update N + if boxes[pos, 8] < threshold: + boxes[pos, :] = boxes[N - 1, :] + inds[pos] = inds[N - 1] + N = N - 1 + pos = pos - 1 + pos = pos + 1 + + return boxes[:N] + + +def nms_locality(polys, thres=0.3): + """ + locality aware nms of EAST + :param polys: a N*9 numpy array. first 8 coordinates, then prob + :return: boxes after nms + """ + S = [] + p = None + for g in polys: + if p is not None and intersection(g, p) > thres: + p = weighted_merge(g, p) + else: + if p is not None: + S.append(p) + p = g + if p is not None: + S.append(p) + + if len(S) == 0: + return np.array([]) + return standard_nms(np.array(S), thres) + + +if __name__ == '__main__': + # 343,350,448,135,474,143,369,359 + print( + Polygon(np.array([[343, 350], [448, 135], [474, 143], [369, 359]])) + .area) \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/pg_postprocess.py b/modules/pytorchocr/postprocess/pg_postprocess.py new file mode 100644 index 0000000..81a8e08 --- /dev/null +++ b/modules/pytorchocr/postprocess/pg_postprocess.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +__dir__ = os.path.dirname(__file__) +sys.path.append(__dir__) +sys.path.append(os.path.join(__dir__, '..')) +from pytorchocr.utils.e2e_utils.pgnet_pp_utils import PGNet_PostProcess + + +class PGPostProcess(object): + """ + The post process for PGNet. + """ + + def __init__(self, character_dict_path, valid_set, score_thresh, mode, + **kwargs): + self.character_dict_path = character_dict_path + self.valid_set = valid_set + self.score_thresh = score_thresh + self.mode = mode + + # c++ la-nms is faster, but only support python 3.5 + self.is_python35 = False + if sys.version_info.major == 3 and sys.version_info.minor == 5: + self.is_python35 = True + + def __call__(self, outs_dict, shape_list): + post = PGNet_PostProcess(self.character_dict_path, self.valid_set, + self.score_thresh, outs_dict, shape_list) + if self.mode == 'fast': + data = post.pg_postprocess_fast() + else: + data = post.pg_postprocess_slow() + return data diff --git a/modules/pytorchocr/postprocess/pse_postprocess/__init__.py b/modules/pytorchocr/postprocess/pse_postprocess/__init__.py new file mode 100644 index 0000000..680473b --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .pse_postprocess import PSEPostProcess \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/pse_postprocess/pse/README.md b/modules/pytorchocr/postprocess/pse_postprocess/pse/README.md new file mode 100644 index 0000000..6a19d5d --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/pse/README.md @@ -0,0 +1,6 @@ +## 编译 +This code is refer from: +https://github.com/whai362/PSENet/blob/python3/models/post_processing/pse +```python +python3 setup.py build_ext --inplace +``` diff --git a/modules/pytorchocr/postprocess/pse_postprocess/pse/__init__.py b/modules/pytorchocr/postprocess/pse_postprocess/pse/__init__.py new file mode 100644 index 0000000..ce0142f --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/pse/__init__.py @@ -0,0 +1,29 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import os +import subprocess + +python_path = sys.executable + +ori_path = os.getcwd() +os.chdir('pytorchocr/postprocess/pse_postprocess/pse') +if subprocess.call( + '{} setup.py build_ext --inplace'.format(python_path), shell=True) != 0: + raise RuntimeError( + 'Cannot compile pse: {}, if your system is windows, you need to install all the default components of `desktop development using C++` in visual studio 2019+'. + format(os.path.dirname(os.path.realpath(__file__)))) +os.chdir(ori_path) + +from .pse import pse diff --git a/modules/pytorchocr/postprocess/pse_postprocess/pse/pse.pyx b/modules/pytorchocr/postprocess/pse_postprocess/pse/pse.pyx new file mode 100644 index 0000000..b2be49e --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/pse/pse.pyx @@ -0,0 +1,70 @@ + +import numpy as np +import cv2 +cimport numpy as np +cimport cython +cimport libcpp +cimport libcpp.pair +cimport libcpp.queue +from libcpp.pair cimport * +from libcpp.queue cimport * + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef np.ndarray[np.int32_t, ndim=2] _pse(np.ndarray[np.uint8_t, ndim=3] kernels, + np.ndarray[np.int32_t, ndim=2] label, + int kernel_num, + int label_num, + float min_area=0): + cdef np.ndarray[np.int32_t, ndim=2] pred + pred = np.zeros((label.shape[0], label.shape[1]), dtype=np.int32) + + for label_idx in range(1, label_num): + if np.sum(label == label_idx) < min_area: + label[label == label_idx] = 0 + + cdef libcpp.queue.queue[libcpp.pair.pair[np.int16_t,np.int16_t]] que = \ + queue[libcpp.pair.pair[np.int16_t,np.int16_t]]() + cdef libcpp.queue.queue[libcpp.pair.pair[np.int16_t,np.int16_t]] nxt_que = \ + queue[libcpp.pair.pair[np.int16_t,np.int16_t]]() + cdef np.int16_t* dx = [-1, 1, 0, 0] + cdef np.int16_t* dy = [0, 0, -1, 1] + cdef np.int16_t tmpx, tmpy + + points = np.array(np.where(label > 0)).transpose((1, 0)) + for point_idx in range(points.shape[0]): + tmpx, tmpy = points[point_idx, 0], points[point_idx, 1] + que.push(pair[np.int16_t,np.int16_t](tmpx, tmpy)) + pred[tmpx, tmpy] = label[tmpx, tmpy] + + cdef libcpp.pair.pair[np.int16_t,np.int16_t] cur + cdef int cur_label + for kernel_idx in range(kernel_num - 1, -1, -1): + while not que.empty(): + cur = que.front() + que.pop() + cur_label = pred[cur.first, cur.second] + + is_edge = True + for j in range(4): + tmpx = cur.first + dx[j] + tmpy = cur.second + dy[j] + if tmpx < 0 or tmpx >= label.shape[0] or tmpy < 0 or tmpy >= label.shape[1]: + continue + if kernels[kernel_idx, tmpx, tmpy] == 0 or pred[tmpx, tmpy] > 0: + continue + + que.push(pair[np.int16_t,np.int16_t](tmpx, tmpy)) + pred[tmpx, tmpy] = cur_label + is_edge = False + if is_edge: + nxt_que.push(cur) + + que, nxt_que = nxt_que, que + + return pred + +def pse(kernels, min_area): + kernel_num = kernels.shape[0] + label_num, label = cv2.connectedComponents(kernels[-1], connectivity=4) + return _pse(kernels[:-1], label, kernel_num, label_num, min_area) \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/pse_postprocess/pse/setup.py b/modules/pytorchocr/postprocess/pse_postprocess/pse/setup.py new file mode 100644 index 0000000..0374678 --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/pse/setup.py @@ -0,0 +1,14 @@ +from distutils.core import setup, Extension +from Cython.Build import cythonize +import numpy + +setup(ext_modules=cythonize(Extension( + 'pse', + sources=['pse.pyx'], + language='c++', + include_dirs=[numpy.get_include()], + library_dirs=[], + libraries=[], + extra_compile_args=['-O3'], + extra_link_args=[] +))) diff --git a/modules/pytorchocr/postprocess/pse_postprocess/pse_postprocess.py b/modules/pytorchocr/postprocess/pse_postprocess/pse_postprocess.py new file mode 100644 index 0000000..531bbcf --- /dev/null +++ b/modules/pytorchocr/postprocess/pse_postprocess/pse_postprocess.py @@ -0,0 +1,105 @@ +""" +This code is refer from: +https://github.com/whai362/PSENet/blob/python3/models/head/psenet_head.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import cv2 +import torch +from torch.nn import functional as F + +from pytorchocr.postprocess.pse_postprocess.pse import pse + + +class PSEPostProcess(object): + """ + The post process for PSE. + """ + + def __init__(self, + thresh=0.5, + box_thresh=0.85, + min_area=16, + box_type='box', + scale=4, + **kwargs): + assert box_type in ['box', 'poly'], 'Only box and poly is supported' + self.thresh = thresh + self.box_thresh = box_thresh + self.min_area = min_area + self.box_type = box_type + self.scale = scale + + def __call__(self, outs_dict, shape_list): + pred = outs_dict['maps'] + if not isinstance(pred, torch.Tensor): + pred = torch.as_tensor(pred) + pred = F.interpolate( + pred, scale_factor=4 // self.scale, mode='bilinear') + + score = F.sigmoid(pred[:, 0, :, :]) + + kernels = (pred > self.thresh).type(torch.float32) + text_mask = kernels[:, 0, :, :] + kernels[:, 0:, :, :] = kernels[:, 0:, :, :] * text_mask + + score = score.numpy() + kernels = kernels.numpy().astype(np.uint8) + + boxes_batch = [] + for batch_index in range(pred.shape[0]): + boxes, scores = self.boxes_from_bitmap(score[batch_index], + kernels[batch_index], + shape_list[batch_index]) + + boxes_batch.append({'points': boxes, 'scores': scores}) + return boxes_batch + + def boxes_from_bitmap(self, score, kernels, shape): + label = pse(kernels, self.min_area) + return self.generate_box(score, label, shape) + + def generate_box(self, score, label, shape): + src_h, src_w, ratio_h, ratio_w = shape + label_num = np.max(label) + 1 + + boxes = [] + scores = [] + for i in range(1, label_num): + ind = label == i + points = np.array(np.where(ind)).transpose((1, 0))[:, ::-1] + + if points.shape[0] < self.min_area: + label[ind] = 0 + continue + + score_i = np.mean(score[ind]) + if score_i < self.box_thresh: + label[ind] = 0 + continue + + if self.box_type == 'box': + rect = cv2.minAreaRect(points) + bbox = cv2.boxPoints(rect) + elif self.box_type == 'poly': + box_height = np.max(points[:, 1]) + 10 + box_width = np.max(points[:, 0]) + 10 + + mask = np.zeros((box_height, box_width), np.uint8) + mask[points[:, 1], points[:, 0]] = 255 + + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + bbox = np.squeeze(contours[0], 1) + else: + raise NotImplementedError + + bbox[:, 0] = np.clip(np.round(bbox[:, 0] / ratio_w), 0, src_w) + bbox[:, 1] = np.clip(np.round(bbox[:, 1] / ratio_h), 0, src_h) + boxes.append(bbox) + scores.append(score_i) + return boxes, scores diff --git a/modules/pytorchocr/postprocess/rec_postprocess.py b/modules/pytorchocr/postprocess/rec_postprocess.py new file mode 100644 index 0000000..b163a5a --- /dev/null +++ b/modules/pytorchocr/postprocess/rec_postprocess.py @@ -0,0 +1,693 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import string +#import paddle +# from paddle.nn import functional as F +import torch + + +class BaseRecLabelDecode(object): + """ Convert between text-label and text-index """ + + def __init__(self, + character_dict_path=None, + use_space_char=False): + + self.beg_str = "sos" + self.end_str = "eos" + + self.character_str = [] + if character_dict_path is None: + self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz" + dict_character = list(self.character_str) + else: + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + for line in lines: + line = line.decode('utf-8').strip("\n").strip("\r\n") + self.character_str.append(line) + if use_space_char: + self.character_str.append(" ") + dict_character = list(self.character_str) + + dict_character = self.add_special_char(dict_character) + self.dict = {} + for i, char in enumerate(dict_character): + self.dict[char] = i + self.character = dict_character + + def add_special_char(self, dict_character): + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text, np.mean(conf_list))) + return result_list + + def get_ignored_tokens(self): + return [0] # for ctc blank + + +class CTCLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, + character_dict_path=None, + use_space_char=False, + **kwargs): + super(CTCLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, torch.Tensor): + preds = preds.numpy() + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True) + + if label is None: + return text + label = self.decode(label) + return text, label + + def add_special_char(self, dict_character): + dict_character = ['blank'] + dict_character + return dict_character + + +class NRTRLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=True, **kwargs): + super(NRTRLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + + if len(preds) == 2: + preds_id = preds[0] + preds_prob = preds[1] + if isinstance(preds_id, torch.Tensor): + preds_id = preds_id.numpy() + if isinstance(preds_prob, torch.Tensor): + preds_prob = preds_prob.numpy() + if preds_id[0][0] == 2: + preds_idx = preds_id[:, 1:] + preds_prob = preds_prob[:, 1:] + else: + preds_idx = preds_id + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + else: + if isinstance(preds, torch.Tensor): + preds = preds.numpy() + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + return text, label + + def add_special_char(self, dict_character): + dict_character = ['blank', '', '', ''] + dict_character + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + try: + char_idx = self.character[int(text_index[batch_idx][idx])] + except: + continue + if char_idx == '': # end + break + char_list.append(char_idx) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text.lower(), np.mean(conf_list).tolist())) + return result_list + +class ViTSTRLabelDecode(NRTRLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(ViTSTRLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, torch.Tensor): + preds = preds[:, 1:].numpy() + else: + preds = preds[:, 1:] + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label[:, 1:]) + return text, label + + def add_special_char(self, dict_character): + dict_character = ['', ''] + dict_character + return dict_character + + +class AttnLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, + character_dict_path=None, + use_space_char=False, + **kwargs): + super(AttnLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def add_special_char(self, dict_character): + self.beg_str = "sos" + self.end_str = "eos" + dict_character = dict_character + dict_character = [self.beg_str] + dict_character + [self.end_str] + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + [beg_idx, end_idx] = self.get_ignored_tokens() + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if int(text_index[batch_idx][idx]) == int(end_idx): + break + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text, np.mean(conf_list))) + return result_list + + def __call__(self, preds, label=None, *args, **kwargs): + """ + text = self.decode(text) + if label is None: + return text + else: + label = self.decode(label, is_remove_duplicate=False) + return text, label + """ + if isinstance(preds, torch.Tensor): + preds = preds.cpu().numpy() + + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + if label is None: + return text + label = self.decode(label, is_remove_duplicate=False) + return text, label + + def get_ignored_tokens(self): + beg_idx = self.get_beg_end_flag_idx("beg") + end_idx = self.get_beg_end_flag_idx("end") + return [beg_idx, end_idx] + + def get_beg_end_flag_idx(self, beg_or_end): + if beg_or_end == "beg": + idx = np.array(self.dict[self.beg_str]) + elif beg_or_end == "end": + idx = np.array(self.dict[self.end_str]) + else: + assert False, "unsupport type %s in get_beg_end_flag_idx" \ + % beg_or_end + return idx + + +class RFLLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(RFLLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def add_special_char(self, dict_character): + self.beg_str = "sos" + self.end_str = "eos" + dict_character = dict_character + dict_character = [self.beg_str] + dict_character + [self.end_str] + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + [beg_idx, end_idx] = self.get_ignored_tokens() + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if int(text_index[batch_idx][idx]) == int(end_idx): + break + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + result_list.append((text, np.mean(conf_list).tolist())) + return result_list + + def __call__(self, preds, label=None, *args, **kwargs): + # if seq_outputs is not None: + if isinstance(preds, tuple) or isinstance(preds, list): + cnt_outputs, seq_outputs = preds + if isinstance(seq_outputs, torch.Tensor): + seq_outputs = seq_outputs.numpy() + preds_idx = seq_outputs.argmax(axis=2) + preds_prob = seq_outputs.max(axis=2) + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + + if label is None: + return text + label = self.decode(label, is_remove_duplicate=False) + return text, label + + else: + cnt_outputs = preds + if isinstance(cnt_outputs, torch.Tensor): + cnt_outputs = cnt_outputs.numpy() + cnt_length = [] + for lens in cnt_outputs: + length = round(np.sum(lens)) + cnt_length.append(length) + if label is None: + return cnt_length + label = self.decode(label, is_remove_duplicate=False) + length = [len(res[0]) for res in label] + return cnt_length, length + + def get_ignored_tokens(self): + beg_idx = self.get_beg_end_flag_idx("beg") + end_idx = self.get_beg_end_flag_idx("end") + return [beg_idx, end_idx] + + def get_beg_end_flag_idx(self, beg_or_end): + if beg_or_end == "beg": + idx = np.array(self.dict[self.beg_str]) + elif beg_or_end == "end": + idx = np.array(self.dict[self.end_str]) + else: + assert False, "unsupport type %s in get_beg_end_flag_idx" \ + % beg_or_end + return idx + + +class SRNLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, + character_dict_path=None, + use_space_char=False, + **kwargs): + self.max_text_length = kwargs.get('max_text_length', 25) + super(SRNLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def __call__(self, preds, label=None, *args, **kwargs): + pred = preds['predict'] + char_num = len(self.character_str) + 2 + if isinstance(pred, torch.Tensor): + pred = pred.numpy() + pred = np.reshape(pred, [-1, char_num]) + + preds_idx = np.argmax(pred, axis=1) + preds_prob = np.max(pred, axis=1) + + preds_idx = np.reshape(preds_idx, [-1, self.max_text_length]) + + preds_prob = np.reshape(preds_prob, [-1, self.max_text_length]) + + text = self.decode(preds_idx, preds_prob) + + if label is None: + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + return text + label = self.decode(label) + return text, label + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + batch_size = len(text_index) + + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + + text = ''.join(char_list) + result_list.append((text, np.mean(conf_list))) + return result_list + + def add_special_char(self, dict_character): + dict_character = dict_character + [self.beg_str, self.end_str] + return dict_character + + def get_ignored_tokens(self): + beg_idx = self.get_beg_end_flag_idx("beg") + end_idx = self.get_beg_end_flag_idx("end") + return [beg_idx, end_idx] + + def get_beg_end_flag_idx(self, beg_or_end): + if beg_or_end == "beg": + idx = np.array(self.dict[self.beg_str]) + elif beg_or_end == "end": + idx = np.array(self.dict[self.end_str]) + else: + assert False, "unsupport type %s in get_beg_end_flag_idx" \ + % beg_or_end + return idx + + +class TableLabelDecode(object): + """ """ + + def __init__(self, + character_dict_path, + **kwargs): + list_character, list_elem = self.load_char_elem_dict(character_dict_path) + list_character = self.add_special_char(list_character) + list_elem = self.add_special_char(list_elem) + self.dict_character = {} + self.dict_idx_character = {} + for i, char in enumerate(list_character): + self.dict_idx_character[i] = char + self.dict_character[char] = i + self.dict_elem = {} + self.dict_idx_elem = {} + for i, elem in enumerate(list_elem): + self.dict_idx_elem[i] = elem + self.dict_elem[elem] = i + + def load_char_elem_dict(self, character_dict_path): + list_character = [] + list_elem = [] + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + substr = lines[0].decode('utf-8').strip("\n").strip("\r\n").split("\t") + character_num = int(substr[0]) + elem_num = int(substr[1]) + for cno in range(1, 1 + character_num): + character = lines[cno].decode('utf-8').strip("\n").strip("\r\n") + list_character.append(character) + for eno in range(1 + character_num, 1 + character_num + elem_num): + elem = lines[eno].decode('utf-8').strip("\n").strip("\r\n") + list_elem.append(elem) + return list_character, list_elem + + def add_special_char(self, list_character): + self.beg_str = "sos" + self.end_str = "eos" + list_character = [self.beg_str] + list_character + [self.end_str] + return list_character + + def __call__(self, preds): + structure_probs = preds['structure_probs'] + loc_preds = preds['loc_preds'] + if isinstance(structure_probs,torch.Tensor): + structure_probs = structure_probs.numpy() + if isinstance(loc_preds,torch.Tensor): + loc_preds = loc_preds.numpy() + structure_idx = structure_probs.argmax(axis=2) + structure_probs = structure_probs.max(axis=2) + structure_str, structure_pos, result_score_list, result_elem_idx_list = self.decode(structure_idx, + structure_probs, 'elem') + res_html_code_list = [] + res_loc_list = [] + batch_num = len(structure_str) + for bno in range(batch_num): + res_loc = [] + for sno in range(len(structure_str[bno])): + text = structure_str[bno][sno] + if text in ['', ' 0 and tmp_elem_idx == end_idx: + break + if tmp_elem_idx in ignored_tokens: + continue + + char_list.append(current_dict[tmp_elem_idx]) + elem_pos_list.append(idx) + score_list.append(structure_probs[batch_idx, idx]) + elem_idx_list.append(tmp_elem_idx) + result_list.append(char_list) + result_pos_list.append(elem_pos_list) + result_score_list.append(score_list) + result_elem_idx_list.append(elem_idx_list) + return result_list, result_pos_list, result_score_list, result_elem_idx_list + + def get_ignored_tokens(self, char_or_elem): + beg_idx = self.get_beg_end_flag_idx("beg", char_or_elem) + end_idx = self.get_beg_end_flag_idx("end", char_or_elem) + return [beg_idx, end_idx] + + def get_beg_end_flag_idx(self, beg_or_end, char_or_elem): + if char_or_elem == "char": + if beg_or_end == "beg": + idx = self.dict_character[self.beg_str] + elif beg_or_end == "end": + idx = self.dict_character[self.end_str] + else: + assert False, "Unsupport type %s in get_beg_end_flag_idx of char" \ + % beg_or_end + elif char_or_elem == "elem": + if beg_or_end == "beg": + idx = self.dict_elem[self.beg_str] + elif beg_or_end == "end": + idx = self.dict_elem[self.end_str] + else: + assert False, "Unsupport type %s in get_beg_end_flag_idx of elem" \ + % beg_or_end + else: + assert False, "Unsupport type %s in char_or_elem" \ + % char_or_elem + return idx + + +class SARLabelDecode(BaseRecLabelDecode): + """ Convert between text-label and text-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(SARLabelDecode, self).__init__(character_dict_path, + use_space_char) + + self.rm_symbol = kwargs.get('rm_symbol', False) + + def add_special_char(self, dict_character): + beg_end_str = "" + unknown_str = "" + padding_str = "" + dict_character = dict_character + [unknown_str] + self.unknown_idx = len(dict_character) - 1 + dict_character = dict_character + [beg_end_str] + self.start_idx = len(dict_character) - 1 + self.end_idx = len(dict_character) - 1 + dict_character = dict_character + [padding_str] + self.padding_idx = len(dict_character) - 1 + return dict_character + + def decode(self, text_index, text_prob=None, is_remove_duplicate=False): + """ convert text-index into text-label. """ + result_list = [] + ignored_tokens = self.get_ignored_tokens() + + batch_size = len(text_index) + for batch_idx in range(batch_size): + char_list = [] + conf_list = [] + for idx in range(len(text_index[batch_idx])): + if text_index[batch_idx][idx] in ignored_tokens: + continue + if int(text_index[batch_idx][idx]) == int(self.end_idx): + if text_prob is None and idx == 0: + continue + else: + break + if is_remove_duplicate: + # only for predict + if idx > 0 and text_index[batch_idx][idx - 1] == text_index[ + batch_idx][idx]: + continue + char_list.append(self.character[int(text_index[batch_idx][ + idx])]) + if text_prob is not None: + conf_list.append(text_prob[batch_idx][idx]) + else: + conf_list.append(1) + text = ''.join(char_list) + if self.rm_symbol: + comp = re.compile('[^A-Z^a-z^0-9^\u4e00-\u9fa5]') + text = text.lower() + text = comp.sub('', text) + result_list.append((text, np.mean(conf_list).tolist())) + return result_list + + def __call__(self, preds, label=None, *args, **kwargs): + if isinstance(preds, torch.Tensor): + preds = preds.cpu().numpy() + preds_idx = preds.argmax(axis=2) + preds_prob = preds.max(axis=2) + + text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False) + + if label is None: + return text + label = self.decode(label, is_remove_duplicate=False) + return text, label + + def get_ignored_tokens(self): + return [self.padding_idx] + + +class CANLabelDecode(BaseRecLabelDecode): + """ Convert between latex-symbol and symbol-index """ + + def __init__(self, character_dict_path=None, use_space_char=False, + **kwargs): + super(CANLabelDecode, self).__init__(character_dict_path, + use_space_char) + + def decode(self, text_index, preds_prob=None): + result_list = [] + batch_size = len(text_index) + for batch_idx in range(batch_size): + seq_end = text_index[batch_idx].argmin(0) + idx_list = text_index[batch_idx][:seq_end].tolist() + symbol_list = [self.character[idx] for idx in idx_list] + probs = [] + if preds_prob is not None: + probs = preds_prob[batch_idx][:len(symbol_list)].tolist() + + result_list.append([' '.join(symbol_list), probs]) + return result_list + + def __call__(self, preds, label=None, *args, **kwargs): + pred_prob, _, _, _ = preds + preds_idx = pred_prob.argmax(axis=2) + + text = self.decode(preds_idx) + if label is None: + return text + label = self.decode(label) + return text, label \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/sast_postprocess.py b/modules/pytorchocr/postprocess/sast_postprocess.py new file mode 100644 index 0000000..26a03c8 --- /dev/null +++ b/modules/pytorchocr/postprocess/sast_postprocess.py @@ -0,0 +1,302 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +__dir__ = os.path.dirname(__file__) +sys.path.append(__dir__) +sys.path.append(os.path.join(__dir__, '..')) + +import numpy as np +from .locality_aware_nms import nms_locality +# import paddle +import torch +import cv2 +import time + + +class SASTPostProcess(object): + """ + The post process for SAST. + """ + + def __init__(self, + score_thresh=0.5, + nms_thresh=0.2, + sample_pts_num=2, + shrink_ratio_of_width=0.3, + expand_scale=1.0, + tcl_map_thresh=0.5, + **kwargs): + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.sample_pts_num = sample_pts_num + self.shrink_ratio_of_width = shrink_ratio_of_width + self.expand_scale = expand_scale + self.tcl_map_thresh = tcl_map_thresh + + # c++ la-nms is faster, but only support python 3.5 + self.is_python35 = False + if sys.version_info.major == 3 and sys.version_info.minor == 5: + self.is_python35 = True + + def point_pair2poly(self, point_pair_list): + """ + Transfer vertical point_pairs into poly point in clockwise. + """ + # constract poly + point_num = len(point_pair_list) * 2 + point_list = [0] * point_num + for idx, point_pair in enumerate(point_pair_list): + point_list[idx] = point_pair[0] + point_list[point_num - 1 - idx] = point_pair[1] + return np.array(point_list).reshape(-1, 2) + + def shrink_quad_along_width(self, quad, begin_width_ratio=0., end_width_ratio=1.): + """ + Generate shrink_quad_along_width. + """ + ratio_pair = np.array([[begin_width_ratio], [end_width_ratio]], dtype=np.float32) + p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair + p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair + return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]]) + + def expand_poly_along_width(self, poly, shrink_ratio_of_width=0.3): + """ + expand poly along width. + """ + point_num = poly.shape[0] + left_quad = np.array([poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32) + left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \ + (np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6) + left_quad_expand = self.shrink_quad_along_width(left_quad, left_ratio, 1.0) + right_quad = np.array([poly[point_num // 2 - 2], poly[point_num // 2 - 1], + poly[point_num // 2], poly[point_num // 2 + 1]], dtype=np.float32) + right_ratio = 1.0 + \ + shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \ + (np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6) + right_quad_expand = self.shrink_quad_along_width(right_quad, 0.0, right_ratio) + poly[0] = left_quad_expand[0] + poly[-1] = left_quad_expand[-1] + poly[point_num // 2 - 1] = right_quad_expand[1] + poly[point_num // 2] = right_quad_expand[2] + return poly + + def restore_quad(self, tcl_map, tcl_map_thresh, tvo_map): + """Restore quad.""" + xy_text = np.argwhere(tcl_map[:, :, 0] > tcl_map_thresh) + xy_text = xy_text[:, ::-1] # (n, 2) + + # Sort the text boxes via the y axis + xy_text = xy_text[np.argsort(xy_text[:, 1])] + + scores = tcl_map[xy_text[:, 1], xy_text[:, 0], 0] + scores = scores[:, np.newaxis] + + # Restore + point_num = int(tvo_map.shape[-1] / 2) + assert point_num == 4 + tvo_map = tvo_map[xy_text[:, 1], xy_text[:, 0], :] + xy_text_tile = np.tile(xy_text, (1, point_num)) # (n, point_num * 2) + quads = xy_text_tile - tvo_map + + return scores, quads, xy_text + + def quad_area(self, quad): + """ + compute area of a quad. + """ + edge = [ + (quad[1][0] - quad[0][0]) * (quad[1][1] + quad[0][1]), + (quad[2][0] - quad[1][0]) * (quad[2][1] + quad[1][1]), + (quad[3][0] - quad[2][0]) * (quad[3][1] + quad[2][1]), + (quad[0][0] - quad[3][0]) * (quad[0][1] + quad[3][1]) + ] + return np.sum(edge) / 2. + + def nms(self, dets): + if self.is_python35: + import lanms + dets = lanms.merge_quadrangle_n9(dets, self.nms_thresh) + else: + dets = nms_locality(dets, self.nms_thresh) + return dets + + def cluster_by_quads_tco(self, tcl_map, tcl_map_thresh, quads, tco_map): + """ + Cluster pixels in tcl_map based on quads. + """ + instance_count = quads.shape[0] + 1 # contain background + instance_label_map = np.zeros(tcl_map.shape[:2], dtype=np.int32) + if instance_count == 1: + return instance_count, instance_label_map + + # predict text center + xy_text = np.argwhere(tcl_map[:, :, 0] > tcl_map_thresh) + n = xy_text.shape[0] + xy_text = xy_text[:, ::-1] # (n, 2) + tco = tco_map[xy_text[:, 1], xy_text[:, 0], :] # (n, 2) + pred_tc = xy_text - tco + + # get gt text center + m = quads.shape[0] + gt_tc = np.mean(quads, axis=1) # (m, 2) + + pred_tc_tile = np.tile(pred_tc[:, np.newaxis, :], (1, m, 1)) # (n, m, 2) + gt_tc_tile = np.tile(gt_tc[np.newaxis, :, :], (n, 1, 1)) # (n, m, 2) + dist_mat = np.linalg.norm(pred_tc_tile - gt_tc_tile, axis=2) # (n, m) + xy_text_assign = np.argmin(dist_mat, axis=1) + 1 # (n,) + + instance_label_map[xy_text[:, 1], xy_text[:, 0]] = xy_text_assign + return instance_count, instance_label_map + + def estimate_sample_pts_num(self, quad, xy_text): + """ + Estimate sample points number. + """ + eh = (np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(quad[1] - quad[2])) / 2.0 + ew = (np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(quad[2] - quad[3])) / 2.0 + + dense_sample_pts_num = max(2, int(ew)) + dense_xy_center_line = xy_text[np.linspace(0, xy_text.shape[0] - 1, dense_sample_pts_num, + endpoint=True, dtype=np.float32).astype(np.int32)] + + dense_xy_center_line_diff = dense_xy_center_line[1:] - dense_xy_center_line[:-1] + estimate_arc_len = np.sum(np.linalg.norm(dense_xy_center_line_diff, axis=1)) + + sample_pts_num = max(2, int(estimate_arc_len / eh)) + return sample_pts_num + + def detect_sast(self, tcl_map, tvo_map, tbo_map, tco_map, ratio_w, ratio_h, src_w, src_h, + shrink_ratio_of_width=0.3, tcl_map_thresh=0.5, offset_expand=1.0, out_strid=4.0): + """ + first resize the tcl_map, tvo_map and tbo_map to the input_size, then restore the polys + """ + # restore quad + scores, quads, xy_text = self.restore_quad(tcl_map, tcl_map_thresh, tvo_map) + dets = np.hstack((quads, scores)).astype(np.float32, copy=False) + dets = self.nms(dets) + if dets.shape[0] == 0: + return [] + quads = dets[:, :-1].reshape(-1, 4, 2) + + # Compute quad area + quad_areas = [] + for quad in quads: + quad_areas.append(-self.quad_area(quad)) + + # instance segmentation + # instance_count, instance_label_map = cv2.connectedComponents(tcl_map.astype(np.uint8), connectivity=8) + instance_count, instance_label_map = self.cluster_by_quads_tco(tcl_map, tcl_map_thresh, quads, tco_map) + + # restore single poly with tcl instance. + poly_list = [] + for instance_idx in range(1, instance_count): + xy_text = np.argwhere(instance_label_map == instance_idx)[:, ::-1] + quad = quads[instance_idx - 1] + q_area = quad_areas[instance_idx - 1] + if q_area < 5: + continue + + # + len1 = float(np.linalg.norm(quad[0] - quad[1])) + len2 = float(np.linalg.norm(quad[1] - quad[2])) + min_len = min(len1, len2) + if min_len < 3: + continue + + # filter small CC + if xy_text.shape[0] <= 0: + continue + + # filter low confidence instance + xy_text_scores = tcl_map[xy_text[:, 1], xy_text[:, 0], 0] + if np.sum(xy_text_scores) / quad_areas[instance_idx - 1] < 0.1: + # if np.sum(xy_text_scores) / quad_areas[instance_idx - 1] < 0.05: + continue + + # sort xy_text + left_center_pt = np.array([[(quad[0, 0] + quad[-1, 0]) / 2.0, + (quad[0, 1] + quad[-1, 1]) / 2.0]]) # (1, 2) + right_center_pt = np.array([[(quad[1, 0] + quad[2, 0]) / 2.0, + (quad[1, 1] + quad[2, 1]) / 2.0]]) # (1, 2) + proj_unit_vec = (right_center_pt - left_center_pt) / \ + (np.linalg.norm(right_center_pt - left_center_pt) + 1e-6) + proj_value = np.sum(xy_text * proj_unit_vec, axis=1) + xy_text = xy_text[np.argsort(proj_value)] + + # Sample pts in tcl map + if self.sample_pts_num == 0: + sample_pts_num = self.estimate_sample_pts_num(quad, xy_text) + else: + sample_pts_num = self.sample_pts_num + xy_center_line = xy_text[np.linspace(0, xy_text.shape[0] - 1, sample_pts_num, + endpoint=True, dtype=np.float32).astype(np.int32)] + + point_pair_list = [] + for x, y in xy_center_line: + # get corresponding offset + offset = tbo_map[y, x, :].reshape(2, 2) + if offset_expand != 1.0: + offset_length = np.linalg.norm(offset, axis=1, keepdims=True) + expand_length = np.clip(offset_length * (offset_expand - 1), a_min=0.5, a_max=3.0) + offset_detal = offset / offset_length * expand_length + offset = offset + offset_detal + # original point + ori_yx = np.array([y, x], dtype=np.float32) + point_pair = (ori_yx + offset)[:, ::-1] * out_strid / np.array([ratio_w, ratio_h]).reshape(-1, 2) + point_pair_list.append(point_pair) + + # ndarry: (x, 2), expand poly along width + detected_poly = self.point_pair2poly(point_pair_list) + detected_poly = self.expand_poly_along_width(detected_poly, shrink_ratio_of_width) + detected_poly[:, 0] = np.clip(detected_poly[:, 0], a_min=0, a_max=src_w) + detected_poly[:, 1] = np.clip(detected_poly[:, 1], a_min=0, a_max=src_h) + poly_list.append(detected_poly) + + return poly_list + + def __call__(self, outs_dict, shape_list): + score_list = outs_dict['f_score'] + border_list = outs_dict['f_border'] + tvo_list = outs_dict['f_tvo'] + tco_list = outs_dict['f_tco'] + if isinstance(score_list, torch.Tensor): + score_list = score_list.cpu().numpy() + border_list = border_list.cpu().numpy() + tvo_list = tvo_list.cpu().numpy() + tco_list = tco_list.cpu().numpy() + + img_num = len(shape_list) + poly_lists = [] + for ino in range(img_num): + p_score = score_list[ino].transpose((1, 2, 0)) + p_border = border_list[ino].transpose((1, 2, 0)) + p_tvo = tvo_list[ino].transpose((1, 2, 0)) + p_tco = tco_list[ino].transpose((1, 2, 0)) + src_h, src_w, ratio_h, ratio_w = shape_list[ino] + + poly_list = self.detect_sast(p_score, p_tvo, p_border, p_tco, ratio_w, ratio_h, src_w, src_h, + shrink_ratio_of_width=self.shrink_ratio_of_width, + tcl_map_thresh=self.tcl_map_thresh, offset_expand=self.expand_scale) + poly_lists.append({'points': np.array(poly_list)}) + + return poly_lists + diff --git a/modules/pytorchocr/pytorchocr_utility.py b/modules/pytorchocr/pytorchocr_utility.py new file mode 100644 index 0000000..8a1fc31 --- /dev/null +++ b/modules/pytorchocr/pytorchocr_utility.py @@ -0,0 +1,555 @@ +import os, sys +import math +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont +import argparse + +def init_args(): + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + # params for prediction engine + parser.add_argument("--use_gpu", type=str2bool, default=True) + # parser.add_argument("--ir_optim", type=str2bool, default=True) + # parser.add_argument("--use_tensorrt", type=str2bool, default=False) + # parser.add_argument("--use_fp16", type=str2bool, default=False) + parser.add_argument("--gpu_mem", type=int, default=500) + parser.add_argument("--warmup", type=str2bool, default=False) + + # params for text detector + parser.add_argument("--image_dir", type=str) + parser.add_argument("--det_algorithm", type=str, default='DB') + parser.add_argument("--det_model_path", type=str) + parser.add_argument("--det_limit_side_len", type=float, default=960) + parser.add_argument("--det_limit_type", type=str, default='max') + + # DB parmas + parser.add_argument("--det_db_thresh", type=float, default=0.3) + parser.add_argument("--det_db_box_thresh", type=float, default=0.6) + parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) + parser.add_argument("--max_batch_size", type=int, default=10) + parser.add_argument("--use_dilation", type=str2bool, default=False) + parser.add_argument("--det_db_score_mode", type=str, default="fast") + + # EAST parmas + parser.add_argument("--det_east_score_thresh", type=float, default=0.8) + parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) + parser.add_argument("--det_east_nms_thresh", type=float, default=0.2) + + # SAST parmas + parser.add_argument("--det_sast_score_thresh", type=float, default=0.5) + parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2) + parser.add_argument("--det_sast_polygon", type=str2bool, default=False) + + # PSE parmas + parser.add_argument("--det_pse_thresh", type=float, default=0) + parser.add_argument("--det_pse_box_thresh", type=float, default=0.85) + parser.add_argument("--det_pse_min_area", type=float, default=16) + parser.add_argument("--det_pse_box_type", type=str, default='box') + parser.add_argument("--det_pse_scale", type=int, default=1) + + # FCE parmas + parser.add_argument("--scales", type=list, default=[8, 16, 32]) + parser.add_argument("--alpha", type=float, default=1.0) + parser.add_argument("--beta", type=float, default=1.0) + parser.add_argument("--fourier_degree", type=int, default=5) + parser.add_argument("--det_fce_box_type", type=str, default='poly') + + # params for text recognizer + parser.add_argument("--rec_algorithm", type=str, default='CRNN') + parser.add_argument("--rec_model_path", type=str) + parser.add_argument("--rec_image_inverse", type=str2bool, default=True) + parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320") + parser.add_argument("--rec_char_type", type=str, default='ch') + parser.add_argument("--rec_batch_num", type=int, default=6) + parser.add_argument("--max_text_length", type=int, default=25) + + parser.add_argument("--use_space_char", type=str2bool, default=True) + parser.add_argument("--drop_score", type=float, default=0.5) + parser.add_argument("--limited_max_width", type=int, default=1280) + parser.add_argument("--limited_min_width", type=int, default=16) + + parser.add_argument( + "--vis_font_path", type=str, + default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'doc/fonts/simfang.ttf')) + parser.add_argument( + "--rec_char_dict_path", + type=str, + default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), + 'pytorchocr/utils/ppocr_keys_v1.txt')) + + # params for text classifier + parser.add_argument("--use_angle_cls", type=str2bool, default=False) + parser.add_argument("--cls_model_path", type=str) + parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") + parser.add_argument("--label_list", type=list, default=['0', '180']) + parser.add_argument("--cls_batch_num", type=int, default=6) + parser.add_argument("--cls_thresh", type=float, default=0.9) + + parser.add_argument("--enable_mkldnn", type=str2bool, default=False) + parser.add_argument("--use_pdserving", type=str2bool, default=False) + + # params for e2e + parser.add_argument("--e2e_algorithm", type=str, default='PGNet') + parser.add_argument("--e2e_model_path", type=str) + parser.add_argument("--e2e_limit_side_len", type=float, default=768) + parser.add_argument("--e2e_limit_type", type=str, default='max') + + # PGNet parmas + parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5) + parser.add_argument( + "--e2e_char_dict_path", type=str, + default=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), + 'pytorchocr/utils/ic15_dict.txt')) + parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext') + parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True) + parser.add_argument("--e2e_pgnet_mode", type=str, default='fast') + + # SR parmas + parser.add_argument("--sr_model_path", type=str) + parser.add_argument("--sr_image_shape", type=str, default="3, 32, 128") + parser.add_argument("--sr_batch_num", type=int, default=1) + + # params .yaml + parser.add_argument("--det_yaml_path", type=str, default=None) + parser.add_argument("--rec_yaml_path", type=str, default=None) + parser.add_argument("--cls_yaml_path", type=str, default=None) + parser.add_argument("--e2e_yaml_path", type=str, default=None) + parser.add_argument("--sr_yaml_path", type=str, default=None) + + # multi-process + parser.add_argument("--use_mp", type=str2bool, default=False) + parser.add_argument("--total_process_num", type=int, default=1) + parser.add_argument("--process_id", type=int, default=0) + + parser.add_argument("--benchmark", type=str2bool, default=False) + parser.add_argument("--save_log_path", type=str, default="./log_output/") + + parser.add_argument("--show_log", type=str2bool, default=True) + + return parser + +def parse_args(): + parser = init_args() + return parser.parse_args() + +def get_default_config(args): + return vars(args) + + +def read_network_config_from_yaml(yaml_path, char_num=None): + if not os.path.exists(yaml_path): + raise FileNotFoundError('{} is not existed.'.format(yaml_path)) + import yaml + with open(yaml_path, encoding='utf-8') as f: + res = yaml.safe_load(f) + if res.get('Architecture') is None: + raise ValueError('{} has no Architecture'.format(yaml_path)) + if res['Architecture']['Head']['name'] == 'MultiHead' and char_num is not None: + res['Architecture']['Head']['out_channels_list'] = { + 'CTCLabelDecode': char_num, + 'SARLabelDecode': char_num + 2, + 'NRTRLabelDecode': char_num + 3 + } + return res['Architecture'] + +def AnalysisConfig(weights_path, yaml_path=None, char_num=None): + if not os.path.exists(os.path.abspath(weights_path)): + raise FileNotFoundError('{} is not found.'.format(weights_path)) + + if yaml_path is not None: + return read_network_config_from_yaml(yaml_path, char_num=char_num) + + weights_basename = os.path.basename(weights_path) + weights_name = weights_basename.lower() + + # supported_weights = ['ch_ptocr_server_v2.0_det_infer.pth', + # 'ch_ptocr_server_v2.0_rec_infer.pth', + # 'ch_ptocr_mobile_v2.0_det_infer.pth', + # 'ch_ptocr_mobile_v2.0_rec_infer.pth', + # 'ch_ptocr_mobile_v2.0_cls_infer.pth', + # ] + # assert weights_name in supported_weights, \ + # "supported weights are {} but input weights is {}".format(supported_weights, weights_name) + + if weights_name == 'ch_ptocr_server_v2.0_det_infer.pth': + network_config = {'model_type':'det', + 'algorithm':'DB', + 'Transform':None, + 'Backbone':{'name':'ResNet_vd', 'layers':18, 'disable_se':True}, + 'Neck':{'name':'DBFPN', 'out_channels':256}, + 'Head':{'name':'DBHead', 'k':50}} + + elif weights_name == 'ch_ptocr_server_v2.0_rec_infer.pth': + network_config = {'model_type':'rec', + 'algorithm':'CRNN', + 'Transform':None, + 'Backbone':{'name':'ResNet', 'layers':34}, + 'Neck':{'name':'SequenceEncoder', 'hidden_size':256, 'encoder_type':'rnn'}, + 'Head':{'name':'CTCHead', 'fc_decay': 4e-05}} + + elif weights_name in ['ch_ptocr_mobile_v2.0_det_infer.pth']: + network_config = {'model_type': 'det', + 'algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': True}, + 'Neck': {'name': 'DBFPN', 'out_channels': 96}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name =='ch_ptocr_mobile_v2.0_rec_infer.pth': + network_config = {'model_type':'rec', + 'algorithm':'CRNN', + 'Transform':None, + 'Backbone':{'model_name':'small', 'name':'MobileNetV3', 'scale':0.5, 'small_stride':[1,2,2,2]}, + 'Neck':{'name':'SequenceEncoder', 'hidden_size':48, 'encoder_type':'rnn'}, + 'Head':{'name':'CTCHead', 'fc_decay': 4e-05}} + + elif weights_name == 'ch_ptocr_mobile_v2.0_cls_infer.pth': + network_config = {'model_type':'cls', + 'algorithm':'CLS', + 'Transform':None, + 'Backbone':{'name':'MobileNetV3', 'model_name':'small', 'scale':0.35}, + 'Neck':None, + 'Head':{'name':'ClsHead', 'class_dim':2}} + + elif weights_name == 'ch_ptocr_v2_rec_infer.pth': + network_config = {'model_type': 'rec', + 'algorithm': 'CRNN', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV1Enhance', 'scale': 0.5}, + 'Neck': {'name': 'SequenceEncoder', 'hidden_size': 64, 'encoder_type': 'rnn'}, + 'Head': {'name': 'CTCHead', 'mid_channels': 96, 'fc_decay': 2e-05}} + + elif weights_name == 'ch_ptocr_v2_det_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': True}, + 'Neck': {'name': 'DBFPN', 'out_channels': 96}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name == 'ch_ptocr_v3_rec_infer.pth': + network_config = {'model_type':'rec', + 'algorithm':'CRNN', + 'Transform':None, + 'Backbone':{'name':'MobileNetV1Enhance', + 'scale':0.5, + 'last_conv_stride': [1, 2], + 'last_pool_type': 'avg'}, + 'Neck':{'name':'SequenceEncoder', + 'dims': 64, + 'depth': 2, + 'hidden_dims': 120, + 'use_guide': True, + 'encoder_type':'svtr'}, + 'Head':{'name':'CTCHead', 'fc_decay': 2e-05} + } + + elif weights_name == 'ch_ptocr_v3_det_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': True}, + 'Neck': {'name': 'RSEFPN', 'out_channels': 96, 'shortcut': True}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name == 'det_mv3_db_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'}, + 'Neck': {'name': 'DBFPN', 'out_channels': 256}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name == 'det_r50_vd_db_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'ResNet_vd', 'layers': 50}, + 'Neck': {'name': 'DBFPN', 'out_channels': 256}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name == 'det_mv3_east_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'EAST', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large'}, + 'Neck': {'name': 'EASTFPN', 'model_name': 'small'}, + 'Head': {'name': 'EASTHead', 'model_name': 'small'}} + + elif weights_name == 'det_r50_vd_east_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'EAST', + 'Transform': None, + 'Backbone': {'name': 'ResNet_vd', 'layers': 50}, + 'Neck': {'name': 'EASTFPN', 'model_name': 'large'}, + 'Head': {'name': 'EASTHead', 'model_name': 'large'}} + + elif weights_name == 'det_r50_vd_sast_icdar15_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'SAST', + 'Transform': None, + 'Backbone': {'name': 'ResNet_SAST', 'layers': 50}, + 'Neck': {'name': 'SASTFPN', 'with_cab': True}, + 'Head': {'name': 'SASTHead'}} + + elif weights_name == 'det_r50_vd_sast_totaltext_v2.0_infer.pth': + network_config = {'model_type': 'det', + 'algorithm': 'SAST', + 'Transform': None, + 'Backbone': {'name': 'ResNet_SAST', 'layers': 50}, + 'Neck': {'name': 'SASTFPN', 'with_cab': True}, + 'Head': {'name': 'SASTHead'}} + + elif weights_name == 'en_server_pgneta_infer.pth': + network_config = {'model_type': 'e2e', + 'algorithm': 'PGNet', + 'Transform': None, + 'Backbone': {'name': 'ResNet', 'layers': 50}, + 'Neck': {'name': 'PGFPN'}, + 'Head': {'name': 'PGHead'}} + + elif weights_name == 'en_ptocr_mobile_v2.0_table_det_infer.pth': + network_config = {'model_type': 'det','algorithm': 'DB', + 'Transform': None, + 'Backbone': {'name': 'MobileNetV3', 'model_name': 'large', 'scale': 0.5, 'disable_se': False}, + 'Neck': {'name': 'DBFPN', 'out_channels': 96}, + 'Head': {'name': 'DBHead', 'k': 50}} + + elif weights_name == 'en_ptocr_mobile_v2.0_table_rec_infer.pth': + network_config = {'model_type': 'rec', + 'algorithm': 'CRNN', + 'Transform': None, + 'Backbone': {'model_name': 'large', 'name': 'MobileNetV3', }, + 'Neck': {'name': 'SequenceEncoder', 'hidden_size': 96, 'encoder_type': 'rnn'}, + 'Head': {'name': 'CTCHead', 'fc_decay': 4e-05}} + + elif 'om_' in weights_name and '_rec_' in weights_name: + network_config = {'model_type': 'rec', + 'algorithm': 'CRNN', + 'Transform': None, + 'Backbone': {'model_name': 'small', 'name': 'MobileNetV3', 'scale': 0.5, + 'small_stride': [1, 2, 2, 2]}, + 'Neck': {'name': 'SequenceEncoder', 'hidden_size': 48, 'encoder_type': 'om'}, + 'Head': {'name': 'CTCHead', 'fc_decay': 4e-05}} + + else: + network_config = {'model_type': 'rec', + 'algorithm': 'CRNN', + 'Transform': None, + 'Backbone': {'model_name': 'small', 'name': 'MobileNetV3', 'scale': 0.5, + 'small_stride': [1, 2, 2, 2]}, + 'Neck': {'name': 'SequenceEncoder', 'hidden_size': 48, 'encoder_type': 'rnn'}, + 'Head': {'name': 'CTCHead', 'fc_decay': 4e-05}} + # raise NotImplementedError + + return network_config + + +def draw_e2e_res(dt_boxes, strs, img_path): + src_im = cv2.imread(img_path) + for box, str in zip(dt_boxes, strs): + box = box.astype(np.int32).reshape((-1, 1, 2)) + cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) + cv2.putText( + src_im, + str, + org=(int(box[0, 0, 0]), int(box[0, 0, 1])), + fontFace=cv2.FONT_HERSHEY_COMPLEX, + fontScale=0.7, + color=(0, 255, 0), + thickness=1) + return src_im + + +def draw_text_det_res(dt_boxes, img_path): + src_im = cv2.imread(img_path) + for box in dt_boxes: + box = np.array(box).astype(np.int32).reshape(-1, 2) + cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) + return src_im + + +def resize_img(img, input_size=600): + """ + resize img and limit the longest side of the image to input_size + """ + img = np.array(img) + im_shape = img.shape + im_size_max = np.max(im_shape[0:2]) + im_scale = float(input_size) / float(im_size_max) + img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale) + return img + + +def draw_ocr_box_txt(image, + boxes, + txts, + scores=None, + drop_score=0.5, + font_path="./doc/simfang.ttf"): + h, w = image.height, image.width + img_left = image.copy() + img_right = Image.new('RGB', (w, h), (255, 255, 255)) + + import random + + random.seed(0) + draw_left = ImageDraw.Draw(img_left) + draw_right = ImageDraw.Draw(img_right) + for idx, (box, txt) in enumerate(zip(boxes, txts)): + if scores is not None and scores[idx] < drop_score: + continue + color = (random.randint(0, 255), random.randint(0, 255), + random.randint(0, 255)) + draw_left.polygon(box, fill=color) + draw_right.polygon( + [ + box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], + box[2][1], box[3][0], box[3][1] + ], + outline=color) + box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][ + 1])**2) + box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][ + 1])**2) + if box_height > 2 * box_width: + font_size = max(int(box_width * 0.9), 10) + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + cur_y = box[0][1] + for c in txt: + char_size = font.getsize(c) + draw_right.text( + (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font) + cur_y += char_size[1] + else: + font_size = max(int(box_height * 0.8), 10) + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + draw_right.text( + [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font) + img_left = Image.blend(image, img_left, 0.5) + img_show = Image.new('RGB', (w * 2, h), (255, 255, 255)) + img_show.paste(img_left, (0, 0, w, h)) + img_show.paste(img_right, (w, 0, w * 2, h)) + return np.array(img_show) + + +def str_count(s): + """ + Count the number of Chinese characters, + a single English character and a single number + equal to half the length of Chinese characters. + args: + s(string): the input of string + return(int): + the number of Chinese characters + """ + import string + count_zh = count_pu = 0 + s_len = len(s) + en_dg_count = 0 + for c in s: + if c in string.ascii_letters or c.isdigit() or c.isspace(): + en_dg_count += 1 + elif c.isalpha(): + count_zh += 1 + else: + count_pu += 1 + return s_len - math.ceil(en_dg_count / 2) + + +def text_visual(texts, + scores, + img_h=400, + img_w=600, + threshold=0., + font_path="./doc/simfang.ttf"): + """ + create new blank img and draw txt on it + args: + texts(list): the text will be draw + scores(list|None): corresponding score of each txt + img_h(int): the height of blank img + img_w(int): the width of blank img + font_path: the path of font which is used to draw text + return(array): + """ + if scores is not None: + assert len(texts) == len( + scores), "The number of txts and corresponding scores must match" + + def create_blank_img(): + blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255 + blank_img[:, img_w - 1:] = 0 + blank_img = Image.fromarray(blank_img).convert("RGB") + draw_txt = ImageDraw.Draw(blank_img) + return blank_img, draw_txt + + blank_img, draw_txt = create_blank_img() + + font_size = 20 + txt_color = (0, 0, 0) + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + + gap = font_size + 5 + txt_img_list = [] + count, index = 1, 0 + for idx, txt in enumerate(texts): + index += 1 + if scores[idx] < threshold or math.isnan(scores[idx]): + index -= 1 + continue + first_line = True + while str_count(txt) >= img_w // font_size - 4: + tmp = txt + txt = tmp[:img_w // font_size - 4] + if first_line: + new_txt = str(index) + ': ' + txt + first_line = False + else: + new_txt = ' ' + txt + draw_txt.text((0, gap * count), new_txt, txt_color, font=font) + txt = tmp[img_w // font_size - 4:] + if count >= img_h // gap - 1: + txt_img_list.append(np.array(blank_img)) + blank_img, draw_txt = create_blank_img() + count = 0 + count += 1 + if first_line: + new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx]) + else: + new_txt = " " + txt + " " + '%.3f' % (scores[idx]) + draw_txt.text((0, gap * count), new_txt, txt_color, font=font) + # whether add new blank img or not + if count >= img_h // gap - 1 and idx + 1 < len(texts): + txt_img_list.append(np.array(blank_img)) + blank_img, draw_txt = create_blank_img() + count = 0 + count += 1 + txt_img_list.append(np.array(blank_img)) + if len(txt_img_list) == 1: + blank_img = np.array(txt_img_list[0]) + else: + blank_img = np.concatenate(txt_img_list, axis=1) + return np.array(blank_img) + + +def base64_to_cv2(b64str): + import base64 + data = base64.b64decode(b64str.encode('utf8')) + data = np.fromstring(data, np.uint8) + data = cv2.imdecode(data, cv2.IMREAD_COLOR) + return data + + +def draw_boxes(image, boxes, scores=None, drop_score=0.5): + if scores is None: + scores = [1] * len(boxes) + for (box, score) in zip(boxes, scores): + if score < drop_score: + continue + box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) + image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) + return image \ No newline at end of file diff --git a/modules/pytorchocr/utils/EN_symbol_dict.txt b/modules/pytorchocr/utils/EN_symbol_dict.txt new file mode 100644 index 0000000..1aef43d --- /dev/null +++ b/modules/pytorchocr/utils/EN_symbol_dict.txt @@ -0,0 +1,94 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +: +; +< += +> +? +@ +[ +\ +] +^ +_ +` +{ +| +} +~ \ No newline at end of file diff --git a/modules/pytorchocr/utils/__init__.py b/modules/pytorchocr/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/pytorchocr/utils/dict/ar_dict.txt b/modules/pytorchocr/utils/dict/ar_dict.txt new file mode 100644 index 0000000..fc63802 --- /dev/null +++ b/modules/pytorchocr/utils/dict/ar_dict.txt @@ -0,0 +1,117 @@ +a +r +b +i +c +_ +m +g +/ +1 +0 +I +L +S +V +R +C +2 +v +l +6 +3 +9 +. +j +p +ا +ل +م +ر +ج +و +ح +ي +ة +5 +8 +7 +أ +ب +ض +4 +ك +س +ه +ث +ن +ط +ع +ت +غ +خ +ف +ئ +ز +إ +د +ص +ظ +ذ +ش +ى +ق +ؤ +آ +ء +s +e +n +w +t +u +z +d +A +N +G +h +o +E +T +H +O +B +y +F +U +J +X +W +P +Z +M +k +q +Y +Q +D +f +K +x +' +% +- +# +@ +! +& +$ +, +: +é +? ++ +É +( + diff --git a/modules/pytorchocr/utils/dict/arabic_dict.txt b/modules/pytorchocr/utils/dict/arabic_dict.txt new file mode 100644 index 0000000..e97abf3 --- /dev/null +++ b/modules/pytorchocr/utils/dict/arabic_dict.txt @@ -0,0 +1,162 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +ء +آ +أ +ؤ +إ +ئ +ا +ب +ة +ت +ث +ج +ح +خ +د +ذ +ر +ز +س +ش +ص +ض +ط +ظ +ع +غ +ف +ق +ك +ل +م +ن +ه +و +ى +ي +ً +ٌ +ٍ +َ +ُ +ِ +ّ +ْ +ٓ +ٔ +ٰ +ٱ +ٹ +پ +چ +ڈ +ڑ +ژ +ک +ڭ +گ +ں +ھ +ۀ +ہ +ۂ +ۃ +ۆ +ۇ +ۈ +ۋ +ی +ې +ے +ۓ +ە +١ +٢ +٣ +٤ +٥ +٦ +٧ +٨ +٩ diff --git a/modules/pytorchocr/utils/dict/be_dict.txt b/modules/pytorchocr/utils/dict/be_dict.txt new file mode 100644 index 0000000..f8458ba --- /dev/null +++ b/modules/pytorchocr/utils/dict/be_dict.txt @@ -0,0 +1,145 @@ +b +e +_ +i +m +g +/ +2 +0 +I +L +S +V +R +C +1 +v +a +l +6 +9 +4 +3 +. +j +p +п +а +з +б +у +г +н +ц +ь +8 +м +л +і +о +ў +ы +7 +5 +М +х +с +р +ф +я +е +д +ж +ю +ч +й +к +Д +в +Б +т +І +ш +ё +э +К +Л +Н +А +Ж +Г +В +П +З +Е +О +Р +С +У +Ё +Й +Т +Ч +Э +Ц +Ю +Ш +Ф +Х +Я +Ь +Ы +Ў +s +c +n +w +M +o +t +T +E +A +B +u +h +y +k +r +H +d +Y +O +U +F +f +x +D +G +N +K +P +z +J +X +W +Z +Q +% +- +q +@ +' +! +# +& +, +: +$ +( +? +é ++ +É + diff --git a/modules/pytorchocr/utils/dict/bg_dict.txt b/modules/pytorchocr/utils/dict/bg_dict.txt new file mode 100644 index 0000000..84713c3 --- /dev/null +++ b/modules/pytorchocr/utils/dict/bg_dict.txt @@ -0,0 +1,140 @@ +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +А +Б +В +Г +Д +Е +Ж +З +И +Й +К +Л +М +Н +О +П +Р +С +Т +У +Ф +Х +Ц +Ч +Ш +Щ +Ъ +Ю +Я +а +б +в +г +д +е +ж +з +и +й +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ь +ю +я + diff --git a/modules/pytorchocr/utils/dict/ch_tra_dict.txt b/modules/pytorchocr/utils/dict/ch_tra_dict.txt new file mode 100644 index 0000000..cc1aa47 --- /dev/null +++ b/modules/pytorchocr/utils/dict/ch_tra_dict.txt @@ -0,0 +1,8421 @@ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +¥ +® +° +± +² +´ +· +» +É +Ë +Ó +× +Ü +à +á +ä +è +é +ì +í +ò +ó +÷ +ú +ü +ā +ē +ī +ō +ū +ǐ +ǒ +ɔ +ɡ +ʌ +ˋ +Λ +Ο +Φ +Ω +α +β +ε +θ +μ +π +З +И +Й +П +Я +г +— +‖ +‘ +’ +“ +” +• +… +‧ +′ +″ +※ +℃ +№ +™ +Ⅱ +Ⅲ +Ⅳ +← +↑ +→ +↓ +⇋ +∈ +∑ +√ +∞ +∣ +∧ +∩ +∫ +∶ +≈ +≠ +≤ +≥ +⊙ +⊥ +① +② +③ +④ +⑧ +⑴ +⑵ +⑶ +─ +│ +┅ +┌ +├ +█ +▎ +▏ +▕ +■ +□ +▪ +▲ +△ +▼ +◆ +◇ +○ +◎ +● +◥ +★ +☆ +❋ +❤ +  +、 +。 +〇 +〉 +《 +》 +「 +」 +『 +』 +【 +】 +〔 +〕 +〖 +〗 +の +サ +シ +ジ +マ +ㄱ +ㆍ +㎏ +㎡ +㐂 +㐱 +㙟 +㴪 +㸃 +䖝 +䝉 +䰾 +䲁 +一 +丁 +七 +丄 +丈 +三 +上 +下 +丌 +不 +与 +丏 +丐 +丑 +且 +丕 +世 +丘 +丙 +丞 +丟 +両 +並 +丨 +丫 +中 +丰 +串 +丶 +丸 +丹 +主 +丼 +丿 +乂 +乃 +久 +么 +之 +乍 +乎 +乏 +乒 +乓 +乖 +乗 +乘 +乙 +乚 +乜 +九 +乞 +也 +乩 +乭 +乳 +乸 +乹 +乾 +亀 +亂 +亅 +了 +予 +亊 +事 +二 +亍 +云 +互 +亓 +五 +井 +亘 +些 +亜 +亞 +亟 +亠 +亡 +亢 +交 +亥 +亦 +亨 +享 +京 +亭 +亮 +亰 +亳 +亶 +亹 +人 +亻 +什 +仁 +仂 +仃 +仄 +仇 +仉 +今 +介 +仍 +仏 +仔 +仕 +他 +仗 +付 +仙 +仛 +仝 +仞 +仟 +仡 +代 +令 +以 +仨 +仫 +仮 +仰 +仲 +仳 +仵 +件 +仺 +任 +仼 +份 +仿 +企 +伃 +伈 +伉 +伊 +伋 +伍 +伎 +伏 +伐 +休 +伕 +伙 +伝 +伢 +伯 +估 +伱 +伴 +伶 +伷 +伸 +伺 +似 +伽 +伾 +佀 +佁 +佃 +但 +佇 +佈 +佉 +佋 +位 +低 +住 +佐 +佑 +体 +佔 +何 +佗 +佘 +余 +佚 +佛 +作 +佝 +佞 +佟 +你 +佣 +佤 +佧 +佩 +佬 +佯 +佰 +佳 +併 +佶 +佹 +佺 +佼 +佾 +使 +侁 +侃 +侄 +侅 +來 +侈 +侊 +例 +侍 +侏 +侑 +侖 +侗 +侘 +侚 +供 +依 +侞 +価 +侮 +侯 +侵 +侶 +侷 +侹 +便 +俁 +係 +促 +俄 +俅 +俊 +俋 +俌 +俍 +俎 +俏 +俐 +俑 +俗 +俘 +俚 +俛 +保 +俞 +俟 +俠 +信 +俬 +修 +俯 +俱 +俳 +俴 +俵 +俶 +俸 +俺 +俽 +俾 +倆 +倈 +倉 +個 +倌 +倍 +們 +倒 +倓 +倔 +倖 +倗 +倘 +候 +倚 +倜 +倞 +借 +倡 +倢 +倣 +値 +倦 +倧 +倩 +倪 +倫 +倬 +倭 +倮 +倻 +值 +偁 +偃 +假 +偈 +偉 +偊 +偌 +偍 +偎 +偏 +偓 +偕 +做 +停 +健 +偪 +偲 +側 +偵 +偶 +偷 +偸 +偽 +傀 +傃 +傅 +傈 +傉 +傍 +傑 +傒 +傕 +傖 +傘 +備 +傜 +傢 +傣 +催 +傭 +傲 +傳 +債 +傷 +傻 +傾 +僅 +僉 +僊 +働 +像 +僑 +僔 +僕 +僖 +僙 +僚 +僜 +僡 +僧 +僩 +僭 +僮 +僰 +僱 +僳 +僴 +僵 +價 +僻 +儀 +儁 +儂 +億 +儆 +儇 +儈 +儉 +儋 +儐 +儒 +儔 +儕 +儘 +儚 +儞 +償 +儡 +儥 +儦 +優 +儫 +儱 +儲 +儷 +儺 +儻 +儼 +兀 +允 +元 +兄 +充 +兆 +先 +光 +克 +兌 +免 +児 +兒 +兔 +兕 +兗 +兜 +入 +內 +全 +兩 +兪 +八 +公 +六 +兮 +共 +兵 +其 +具 +典 +兼 +兿 +冀 +冂 +円 +冇 +冉 +冊 +再 +冏 +冑 +冒 +冕 +冖 +冗 +冚 +冠 +冢 +冤 +冥 +冧 +冨 +冪 +冫 +冬 +冮 +冰 +冴 +冶 +冷 +冼 +冽 +凃 +凄 +准 +凈 +凋 +凌 +凍 +凖 +凜 +凝 +凞 +几 +凡 +処 +凪 +凬 +凰 +凱 +凳 +凵 +凶 +凸 +凹 +出 +函 +刀 +刁 +刂 +刃 +刄 +分 +切 +刈 +刊 +刎 +刑 +划 +列 +初 +判 +別 +刦 +刧 +刨 +利 +刪 +刮 +到 +制 +刷 +券 +刺 +刻 +刼 +剁 +剃 +則 +削 +剋 +剌 +前 +剎 +剏 +剔 +剖 +剛 +剝 +剡 +剣 +剩 +剪 +剮 +副 +割 +創 +剿 +劃 +劄 +劇 +劈 +劉 +劊 +劌 +劍 +劑 +劔 +力 +功 +加 +劣 +助 +努 +劫 +劬 +劭 +劵 +効 +劼 +劾 +勁 +勃 +勅 +勇 +勉 +勐 +勑 +勒 +勔 +動 +勖 +勗 +勘 +務 +勛 +勝 +勞 +募 +勢 +勣 +勤 +勦 +勰 +勱 +勲 +勳 +勵 +勷 +勸 +勺 +勻 +勾 +勿 +匂 +匄 +包 +匆 +匈 +匋 +匍 +匏 +匐 +匕 +化 +北 +匙 +匚 +匝 +匠 +匡 +匣 +匪 +匯 +匱 +匸 +匹 +匾 +匿 +區 +十 +千 +卅 +升 +午 +卉 +半 +卋 +卍 +卐 +卑 +卒 +卓 +協 +南 +博 +卜 +卞 +卟 +占 +卡 +卣 +卦 +卧 +卩 +卬 +卮 +卯 +印 +危 +卲 +即 +卵 +卷 +卸 +卹 +卺 +卻 +卽 +卿 +厄 +厓 +厔 +厙 +厚 +厝 +原 +厥 +厭 +厰 +厲 +厴 +厶 +去 +參 +叄 +又 +叉 +及 +友 +反 +収 +叔 +叕 +取 +受 +叛 +叟 +叡 +叢 +口 +古 +句 +另 +叨 +叩 +只 +叫 +召 +叭 +叮 +可 +台 +叱 +史 +右 +叵 +司 +叻 +叼 +吁 +吃 +各 +吆 +合 +吉 +吊 +吋 +同 +名 +后 +吏 +吐 +向 +吒 +吔 +吖 +君 +吝 +吞 +吟 +吠 +吡 +吥 +否 +吧 +吩 +含 +吮 +吱 +吲 +吳 +吵 +吶 +吸 +吹 +吻 +吼 +吾 +呀 +呂 +呃 +呈 +呉 +告 +呋 +呎 +呢 +呤 +呦 +周 +呱 +味 +呵 +呷 +呸 +呼 +命 +呾 +咀 +咁 +咂 +咄 +咅 +咆 +咋 +和 +咎 +咑 +咒 +咔 +咕 +咖 +咗 +咘 +咚 +咟 +咤 +咥 +咧 +咨 +咩 +咪 +咫 +咬 +咭 +咯 +咱 +咲 +咳 +咸 +咻 +咼 +咽 +咾 +咿 +哀 +品 +哂 +哄 +哆 +哇 +哈 +哉 +哌 +哎 +哏 +哐 +哖 +哚 +哞 +員 +哥 +哦 +哨 +哩 +哪 +哭 +哮 +哱 +哲 +哺 +哼 +唃 +唄 +唆 +唇 +唉 +唏 +唐 +唑 +唔 +唘 +唧 +唫 +唬 +唭 +售 +唯 +唱 +唳 +唵 +唷 +唸 +唻 +唾 +啁 +啃 +啄 +商 +啉 +啊 +啍 +問 +啓 +啖 +啚 +啜 +啞 +啟 +啡 +啣 +啤 +啥 +啦 +啪 +啫 +啯 +啰 +啱 +啲 +啵 +啶 +啷 +啻 +啼 +啾 +喀 +喂 +喃 +善 +喆 +喇 +喈 +喉 +喊 +喋 +喏 +喔 +喘 +喙 +喚 +喜 +喝 +喢 +喦 +喧 +喪 +喫 +喬 +單 +喰 +喱 +喲 +喳 +喵 +喹 +喻 +喼 +嗄 +嗅 +嗆 +嗇 +嗊 +嗎 +嗑 +嗒 +嗓 +嗔 +嗖 +嗚 +嗜 +嗝 +嗞 +嗡 +嗢 +嗣 +嗦 +嗨 +嗩 +嗪 +嗮 +嗯 +嗲 +嗶 +嗹 +嗽 +嘀 +嘅 +嘆 +嘉 +嘌 +嘍 +嘎 +嘏 +嘔 +嘗 +嘚 +嘛 +嘜 +嘞 +嘟 +嘢 +嘣 +嘥 +嘧 +嘩 +嘬 +嘮 +嘯 +嘰 +嘲 +嘴 +嘶 +嘸 +嘹 +嘻 +嘿 +噁 +噌 +噍 +噏 +噓 +噗 +噝 +噠 +噢 +噤 +噥 +噦 +器 +噩 +噪 +噬 +噯 +噰 +噲 +噴 +噶 +噸 +噹 +噻 +嚇 +嚈 +嚎 +嚏 +嚐 +嚒 +嚓 +嚕 +嚗 +嚙 +嚞 +嚟 +嚤 +嚦 +嚧 +嚨 +嚩 +嚮 +嚳 +嚴 +嚶 +嚷 +嚼 +嚿 +囀 +囂 +囃 +囉 +囊 +囍 +囑 +囒 +囓 +囗 +囚 +四 +囝 +回 +因 +囡 +団 +囤 +囧 +囪 +囮 +囯 +困 +囲 +図 +囶 +囷 +囹 +固 +囿 +圂 +圃 +圄 +圈 +圉 +國 +圍 +圏 +園 +圓 +圖 +圗 +團 +圜 +土 +圧 +在 +圩 +圪 +圭 +圯 +地 +圳 +圻 +圾 +址 +均 +坊 +坋 +坌 +坍 +坎 +坐 +坑 +坖 +坡 +坣 +坤 +坦 +坨 +坩 +坪 +坫 +坬 +坭 +坮 +坯 +坳 +坵 +坶 +坷 +坻 +垂 +垃 +垈 +型 +垍 +垓 +垕 +垚 +垛 +垞 +垟 +垠 +垢 +垣 +垮 +垯 +垰 +垵 +垸 +垻 +垿 +埃 +埅 +埇 +埈 +埋 +埌 +城 +埏 +埒 +埔 +埕 +埗 +埜 +域 +埠 +埡 +埤 +埧 +埨 +埪 +埭 +埮 +埴 +埵 +執 +培 +基 +埻 +埼 +堀 +堂 +堃 +堅 +堆 +堇 +堈 +堉 +堊 +堍 +堖 +堝 +堡 +堤 +堦 +堪 +堮 +堯 +堰 +報 +場 +堵 +堷 +堺 +塀 +塅 +塆 +塊 +塋 +塌 +塍 +塏 +塑 +塔 +塗 +塘 +塙 +塜 +塞 +塡 +塢 +塤 +塨 +塩 +填 +塬 +塭 +塰 +塱 +塲 +塵 +塹 +塽 +塾 +墀 +境 +墅 +墉 +墊 +墎 +墓 +増 +墘 +墜 +增 +墟 +墡 +墣 +墨 +墩 +墫 +墬 +墮 +墱 +墳 +墺 +墼 +墾 +壁 +壄 +壆 +壇 +壋 +壌 +壎 +壐 +壑 +壓 +壔 +壕 +壘 +壙 +壞 +壟 +壠 +壢 +壤 +壩 +士 +壬 +壯 +壱 +壴 +壹 +壺 +壽 +夀 +夆 +変 +夊 +夋 +夌 +夏 +夔 +夕 +外 +夙 +多 +夜 +夠 +夢 +夤 +夥 +大 +天 +太 +夫 +夬 +夭 +央 +夯 +失 +夷 +夾 +奀 +奄 +奇 +奈 +奉 +奎 +奏 +奐 +契 +奓 +奔 +奕 +套 +奘 +奚 +奠 +奢 +奣 +奧 +奩 +奪 +奫 +奭 +奮 +女 +奴 +奶 +她 +好 +妀 +妁 +如 +妃 +妄 +妊 +妍 +妏 +妑 +妒 +妓 +妖 +妙 +妝 +妞 +妠 +妤 +妥 +妧 +妨 +妭 +妮 +妯 +妲 +妳 +妸 +妹 +妺 +妻 +妾 +姀 +姁 +姃 +姆 +姈 +姉 +姊 +始 +姌 +姍 +姐 +姑 +姒 +姓 +委 +姚 +姜 +姝 +姣 +姥 +姦 +姨 +姪 +姫 +姬 +姮 +姵 +姶 +姸 +姻 +姿 +威 +娃 +娉 +娋 +娌 +娍 +娎 +娑 +娖 +娘 +娛 +娜 +娟 +娠 +娣 +娥 +娩 +娫 +娳 +娶 +娸 +娼 +娽 +婀 +婁 +婆 +婉 +婊 +婑 +婕 +婚 +婢 +婦 +婧 +婪 +婭 +婯 +婷 +婺 +婻 +婼 +婿 +媃 +媄 +媊 +媐 +媒 +媓 +媖 +媗 +媚 +媛 +媜 +媞 +媧 +媭 +媯 +媲 +媳 +媺 +媼 +媽 +媾 +媿 +嫁 +嫂 +嫄 +嫈 +嫉 +嫌 +嫖 +嫘 +嫚 +嫡 +嫣 +嫦 +嫩 +嫪 +嫲 +嫳 +嫵 +嫺 +嫻 +嬅 +嬈 +嬉 +嬋 +嬌 +嬗 +嬛 +嬝 +嬡 +嬤 +嬨 +嬪 +嬬 +嬭 +嬰 +嬴 +嬸 +嬾 +嬿 +孀 +孃 +孆 +孋 +孌 +子 +孑 +孔 +孕 +孖 +字 +存 +孚 +孛 +孜 +孝 +孟 +孢 +季 +孤 +孩 +孫 +孬 +孮 +孰 +孳 +孵 +學 +孺 +孻 +孽 +孿 +宀 +它 +宅 +宇 +守 +安 +宋 +完 +宍 +宏 +宓 +宕 +宗 +官 +宙 +定 +宛 +宜 +実 +客 +宣 +室 +宥 +宦 +宧 +宮 +宰 +害 +宴 +宵 +家 +宸 +容 +宿 +寀 +寁 +寂 +寄 +寅 +密 +寇 +寈 +寊 +富 +寐 +寒 +寓 +寔 +寕 +寖 +寗 +寘 +寛 +寜 +寞 +察 +寡 +寢 +寤 +寥 +實 +寧 +寨 +審 +寫 +寬 +寮 +寯 +寰 +寳 +寵 +寶 +寸 +寺 +対 +封 +専 +尃 +射 +將 +專 +尉 +尊 +尋 +對 +導 +小 +尐 +少 +尓 +尕 +尖 +尗 +尙 +尚 +尢 +尤 +尨 +尪 +尬 +就 +尷 +尹 +尺 +尻 +尼 +尾 +尿 +局 +屁 +屄 +居 +屆 +屇 +屈 +屋 +屌 +屍 +屎 +屏 +屐 +屑 +屓 +展 +屚 +屜 +屠 +屢 +層 +履 +屬 +屭 +屯 +山 +屹 +屺 +屻 +岀 +岈 +岌 +岐 +岑 +岔 +岡 +岢 +岣 +岧 +岩 +岪 +岫 +岬 +岰 +岱 +岳 +岵 +岷 +岸 +岻 +峁 +峅 +峇 +峋 +峍 +峒 +峘 +峙 +峚 +峠 +峨 +峩 +峪 +峭 +峯 +峰 +峴 +島 +峻 +峼 +峽 +崁 +崆 +崇 +崈 +崋 +崍 +崎 +崐 +崑 +崒 +崔 +崖 +崗 +崘 +崙 +崚 +崛 +崞 +崟 +崠 +崢 +崤 +崧 +崩 +崬 +崮 +崱 +崴 +崵 +崶 +崽 +嵇 +嵊 +嵋 +嵌 +嵎 +嵐 +嵒 +嵕 +嵖 +嵗 +嵙 +嵛 +嵜 +嵨 +嵩 +嵬 +嵮 +嵯 +嵰 +嵴 +嵻 +嵿 +嶁 +嶂 +嶃 +嶄 +嶇 +嶋 +嶌 +嶍 +嶒 +嶔 +嶗 +嶝 +嶠 +嶢 +嶦 +嶧 +嶪 +嶬 +嶰 +嶲 +嶴 +嶷 +嶸 +嶺 +嶼 +嶽 +巂 +巄 +巆 +巋 +巌 +巍 +巎 +巑 +巒 +巔 +巖 +巘 +巛 +川 +州 +巡 +巢 +工 +左 +巧 +巨 +巫 +差 +巰 +己 +已 +巳 +巴 +巶 +巷 +巻 +巽 +巾 +巿 +市 +布 +帆 +希 +帑 +帔 +帕 +帖 +帘 +帙 +帚 +帛 +帝 +帡 +帢 +帥 +師 +席 +帯 +帰 +帳 +帶 +帷 +常 +帽 +幀 +幃 +幄 +幅 +幌 +幔 +幕 +幗 +幚 +幛 +幟 +幡 +幢 +幣 +幪 +幫 +干 +平 +年 +幵 +幷 +幸 +幹 +幺 +幻 +幼 +幽 +幾 +庀 +庁 +広 +庇 +床 +序 +底 +庖 +店 +庚 +府 +庠 +庢 +庥 +度 +座 +庫 +庭 +庲 +庵 +庶 +康 +庸 +庹 +庼 +庾 +廁 +廂 +廄 +廆 +廈 +廉 +廊 +廋 +廌 +廍 +廑 +廓 +廔 +廕 +廖 +廙 +廚 +廝 +廞 +廟 +廠 +廡 +廢 +廣 +廧 +廨 +廩 +廬 +廰 +廱 +廳 +延 +廷 +廸 +建 +廻 +廼 +廿 +弁 +弄 +弅 +弇 +弈 +弉 +弊 +弋 +弍 +式 +弐 +弒 +弓 +弔 +引 +弖 +弗 +弘 +弛 +弟 +弢 +弦 +弧 +弨 +弩 +弭 +弱 +張 +強 +弸 +弼 +弾 +彀 +彄 +彅 +彆 +彈 +彊 +彌 +彎 +彐 +彔 +彖 +彗 +彘 +彙 +彜 +彞 +彠 +彡 +形 +彣 +彤 +彥 +彧 +彩 +彪 +彫 +彬 +彭 +彰 +影 +彳 +彷 +役 +彼 +彿 +往 +征 +徂 +待 +徇 +很 +徉 +徊 +律 +後 +徐 +徑 +徒 +得 +徘 +徙 +徜 +從 +徠 +御 +徧 +徨 +復 +循 +徫 +徬 +徭 +微 +徳 +徴 +徵 +德 +徸 +徹 +徽 +心 +忄 +必 +忉 +忌 +忍 +忐 +忑 +忒 +志 +忘 +忙 +応 +忝 +忞 +忠 +快 +忬 +忯 +忱 +忳 +念 +忻 +忽 +忿 +怍 +怎 +怒 +怕 +怖 +怙 +怛 +思 +怠 +怡 +急 +怦 +性 +怨 +怪 +怯 +怵 +恁 +恂 +恃 +恆 +恊 +恍 +恐 +恕 +恙 +恢 +恣 +恤 +恥 +恨 +恩 +恪 +恬 +恭 +息 +恰 +恵 +恿 +悄 +悅 +悆 +悉 +悌 +悍 +悔 +悖 +悚 +悛 +悝 +悞 +悟 +悠 +患 +悧 +您 +悪 +悰 +悲 +悳 +悵 +悶 +悸 +悼 +情 +惆 +惇 +惑 +惔 +惕 +惘 +惚 +惜 +惟 +惠 +惡 +惣 +惦 +惰 +惱 +惲 +想 +惶 +惹 +惺 +愁 +愃 +愆 +愈 +愉 +愍 +意 +愐 +愒 +愔 +愕 +愚 +愛 +愜 +感 +愣 +愧 +愨 +愫 +愭 +愴 +愷 +愼 +愾 +愿 +慄 +慈 +態 +慌 +慎 +慕 +慘 +慚 +慜 +慟 +慢 +慣 +慥 +慧 +慨 +慮 +慰 +慳 +慵 +慶 +慷 +慾 +憂 +憊 +憋 +憍 +憎 +憐 +憑 +憓 +憕 +憙 +憚 +憤 +憧 +憨 +憩 +憫 +憬 +憲 +憶 +憺 +憻 +憾 +懂 +懃 +懇 +懈 +應 +懋 +懌 +懍 +懐 +懣 +懦 +懮 +懲 +懵 +懶 +懷 +懸 +懺 +懼 +懽 +懾 +懿 +戀 +戇 +戈 +戊 +戌 +戍 +戎 +成 +我 +戒 +戔 +戕 +或 +戙 +戚 +戛 +戟 +戡 +戢 +戥 +戦 +戩 +截 +戮 +戰 +戱 +戲 +戳 +戴 +戶 +戸 +戻 +戽 +戾 +房 +所 +扁 +扆 +扇 +扈 +扉 +手 +扌 +才 +扎 +扒 +打 +扔 +托 +扙 +扛 +扞 +扣 +扥 +扦 +扭 +扮 +扯 +扳 +扶 +批 +扼 +找 +承 +技 +抃 +抄 +抇 +抉 +把 +抑 +抒 +抓 +投 +抖 +抗 +折 +抦 +披 +抬 +抱 +抵 +抹 +抻 +押 +抽 +抿 +拂 +拆 +拇 +拈 +拉 +拋 +拌 +拍 +拎 +拏 +拐 +拒 +拓 +拔 +拖 +拗 +拘 +拙 +拚 +招 +拜 +拝 +拡 +括 +拭 +拮 +拯 +拱 +拳 +拴 +拷 +拺 +拼 +拽 +拾 +拿 +持 +指 +按 +挎 +挑 +挖 +挙 +挨 +挪 +挫 +振 +挲 +挵 +挹 +挺 +挻 +挾 +捂 +捆 +捉 +捌 +捍 +捎 +捏 +捐 +捒 +捕 +捜 +捦 +捧 +捨 +捩 +捫 +捭 +捱 +捲 +捶 +捷 +捺 +捻 +掀 +掂 +掃 +掄 +掇 +授 +掉 +掌 +掏 +掐 +排 +掖 +掘 +掙 +掛 +掞 +掟 +掠 +採 +探 +掣 +接 +控 +推 +掩 +措 +掬 +掰 +掾 +揀 +揄 +揆 +揉 +揍 +描 +提 +插 +揔 +揖 +揚 +換 +握 +揪 +揭 +揮 +援 +揸 +揺 +損 +搏 +搐 +搓 +搔 +搖 +搗 +搜 +搞 +搠 +搢 +搪 +搬 +搭 +搳 +搴 +搵 +搶 +搽 +搾 +摂 +摒 +摔 +摘 +摜 +摞 +摟 +摠 +摧 +摩 +摭 +摯 +摳 +摴 +摵 +摶 +摸 +摹 +摺 +摻 +摽 +撃 +撇 +撈 +撐 +撒 +撓 +撕 +撖 +撙 +撚 +撞 +撣 +撤 +撥 +撩 +撫 +撬 +播 +撮 +撰 +撲 +撳 +撻 +撼 +撾 +撿 +擀 +擁 +擂 +擅 +擇 +擊 +擋 +操 +擎 +擒 +擔 +擘 +據 +擠 +擢 +擥 +擦 +擬 +擯 +擰 +擱 +擲 +擴 +擷 +擺 +擼 +擾 +攀 +攏 +攔 +攖 +攘 +攜 +攝 +攞 +攢 +攣 +攤 +攪 +攫 +攬 +支 +攴 +攵 +收 +攷 +攸 +改 +攻 +攽 +放 +政 +故 +效 +敍 +敎 +敏 +救 +敔 +敕 +敖 +敗 +敘 +教 +敝 +敞 +敟 +敢 +散 +敦 +敫 +敬 +敭 +敲 +整 +敵 +敷 +數 +敻 +敾 +斂 +斃 +文 +斌 +斎 +斐 +斑 +斕 +斖 +斗 +料 +斛 +斜 +斝 +斟 +斡 +斤 +斥 +斧 +斬 +斯 +新 +斷 +方 +於 +施 +斿 +旁 +旂 +旃 +旄 +旅 +旉 +旋 +旌 +旎 +族 +旖 +旗 +旙 +旛 +旡 +既 +日 +旦 +旨 +早 +旬 +旭 +旱 +旲 +旳 +旺 +旻 +旼 +旽 +旾 +旿 +昀 +昂 +昃 +昆 +昇 +昉 +昊 +昌 +昍 +明 +昏 +昐 +易 +昔 +昕 +昚 +昛 +昜 +昝 +昞 +星 +映 +昡 +昣 +昤 +春 +昧 +昨 +昪 +昫 +昭 +是 +昰 +昱 +昴 +昵 +昶 +昺 +晁 +時 +晃 +晈 +晉 +晊 +晏 +晗 +晙 +晚 +晛 +晝 +晞 +晟 +晤 +晦 +晧 +晨 +晩 +晪 +晫 +晭 +普 +景 +晰 +晳 +晴 +晶 +晷 +晸 +智 +晾 +暃 +暄 +暅 +暇 +暈 +暉 +暊 +暌 +暎 +暏 +暐 +暑 +暕 +暖 +暗 +暘 +暝 +暟 +暠 +暢 +暦 +暨 +暫 +暮 +暱 +暲 +暴 +暸 +暹 +暻 +暾 +曄 +曅 +曆 +曇 +曉 +曌 +曔 +曖 +曙 +曜 +曝 +曠 +曦 +曧 +曨 +曩 +曬 +曮 +曰 +曲 +曳 +更 +曶 +曷 +書 +曹 +曺 +曼 +曽 +曾 +替 +最 +會 +月 +有 +朊 +朋 +服 +朏 +朐 +朓 +朔 +朕 +朖 +朗 +望 +朝 +期 +朦 +朧 +木 +未 +末 +本 +札 +朱 +朴 +朵 +朶 +朽 +朿 +杁 +杉 +杋 +杌 +李 +杏 +材 +村 +杓 +杖 +杙 +杜 +杞 +束 +杠 +杣 +杤 +杧 +杬 +杭 +杯 +東 +杲 +杳 +杴 +杵 +杷 +杻 +杼 +松 +板 +极 +枇 +枉 +枋 +枏 +析 +枕 +枖 +林 +枚 +枛 +果 +枝 +枠 +枡 +枯 +枰 +枱 +枲 +枳 +架 +枷 +枸 +枹 +枼 +柁 +柃 +柄 +柉 +柊 +柎 +柏 +某 +柑 +柒 +染 +柔 +柘 +柚 +柜 +柝 +柞 +柟 +查 +柩 +柬 +柯 +柰 +柱 +柳 +柴 +柵 +柶 +柷 +査 +柾 +柿 +栃 +栄 +栐 +栒 +栓 +栜 +栝 +栞 +校 +栢 +栨 +栩 +株 +栲 +栴 +核 +根 +栻 +格 +栽 +桀 +桁 +桂 +桃 +桄 +桅 +框 +案 +桉 +桌 +桎 +桐 +桑 +桓 +桔 +桕 +桖 +桙 +桜 +桝 +桫 +桱 +桲 +桴 +桶 +桷 +桼 +桿 +梀 +梁 +梂 +梃 +梅 +梆 +梉 +梏 +梓 +梔 +梗 +梘 +條 +梟 +梠 +梢 +梣 +梧 +梨 +梫 +梭 +梯 +械 +梱 +梳 +梵 +梶 +梽 +棄 +棆 +棉 +棋 +棍 +棐 +棒 +棓 +棕 +棖 +棗 +棘 +棚 +棛 +棟 +棠 +棡 +棣 +棧 +棨 +棩 +棪 +棫 +森 +棱 +棲 +棵 +棶 +棹 +棺 +棻 +棼 +棽 +椅 +椆 +椇 +椋 +植 +椎 +椏 +椒 +椙 +椥 +椪 +椰 +椲 +椴 +椵 +椹 +椽 +椿 +楂 +楊 +楓 +楔 +楗 +楙 +楚 +楝 +楞 +楠 +楡 +楢 +楣 +楤 +楦 +楧 +楨 +楫 +業 +楮 +楯 +楳 +極 +楷 +楸 +楹 +楽 +楿 +概 +榆 +榊 +榍 +榎 +榑 +榔 +榕 +榖 +榗 +榘 +榛 +榜 +榞 +榢 +榣 +榤 +榦 +榧 +榨 +榫 +榭 +榮 +榲 +榴 +榷 +榻 +榿 +槀 +槁 +槃 +槊 +構 +槌 +槍 +槎 +槐 +槓 +槔 +槗 +様 +槙 +槤 +槩 +槭 +槰 +槱 +槲 +槳 +槺 +槻 +槼 +槽 +槿 +樀 +樁 +樂 +樅 +樆 +樊 +樋 +樑 +樓 +樗 +樘 +標 +樞 +樟 +模 +樣 +樨 +権 +樫 +樵 +樸 +樹 +樺 +樻 +樽 +樾 +橄 +橇 +橈 +橋 +橐 +橒 +橓 +橘 +橙 +橚 +機 +橡 +橢 +橪 +橫 +橿 +檀 +檄 +檇 +檉 +檊 +檎 +檐 +檔 +檗 +檜 +檞 +檠 +檡 +檢 +檣 +檦 +檨 +檫 +檬 +檯 +檳 +檵 +檸 +檻 +檽 +櫂 +櫃 +櫆 +櫈 +櫓 +櫚 +櫛 +櫞 +櫟 +櫥 +櫨 +櫪 +櫱 +櫸 +櫻 +櫾 +櫿 +欄 +欉 +權 +欏 +欒 +欖 +欞 +欠 +次 +欣 +欥 +欲 +欸 +欹 +欺 +欽 +款 +歆 +歇 +歉 +歊 +歌 +歎 +歐 +歓 +歙 +歛 +歡 +止 +正 +此 +步 +武 +歧 +歩 +歪 +歲 +歳 +歴 +歷 +歸 +歹 +死 +歿 +殂 +殃 +殄 +殆 +殉 +殊 +殑 +殖 +殘 +殛 +殞 +殟 +殤 +殭 +殮 +殯 +殲 +殳 +段 +殷 +殺 +殻 +殼 +殿 +毀 +毅 +毆 +毉 +毋 +毌 +母 +毎 +每 +毐 +毒 +毓 +比 +毖 +毗 +毘 +毛 +毫 +毬 +毯 +毴 +毸 +毽 +毿 +氂 +氈 +氍 +氏 +氐 +民 +氓 +氖 +気 +氘 +氙 +氚 +氛 +氟 +氣 +氦 +氧 +氨 +氪 +氫 +氬 +氮 +氯 +氰 +水 +氵 +氷 +永 +氹 +氻 +氽 +氾 +汀 +汁 +求 +汊 +汎 +汐 +汕 +汗 +汛 +汜 +汝 +汞 +江 +池 +污 +汧 +汨 +汩 +汪 +汭 +汰 +汲 +汴 +汶 +決 +汽 +汾 +沁 +沂 +沃 +沄 +沅 +沆 +沇 +沈 +沉 +沌 +沍 +沏 +沐 +沒 +沓 +沔 +沖 +沘 +沙 +沚 +沛 +沜 +沢 +沨 +沫 +沭 +沮 +沯 +沱 +河 +沸 +油 +沺 +治 +沼 +沽 +沾 +沿 +況 +泂 +泄 +泆 +泇 +泉 +泊 +泌 +泐 +泓 +泔 +法 +泖 +泗 +泚 +泛 +泠 +泡 +波 +泣 +泥 +泩 +泫 +泮 +泯 +泰 +泱 +泳 +泵 +洄 +洋 +洌 +洎 +洗 +洙 +洛 +洞 +洢 +洣 +洤 +津 +洨 +洩 +洪 +洮 +洱 +洲 +洳 +洵 +洸 +洹 +洺 +活 +洽 +派 +流 +浄 +浙 +浚 +浛 +浜 +浞 +浟 +浠 +浡 +浣 +浤 +浥 +浦 +浩 +浪 +浮 +浯 +浴 +浵 +海 +浸 +浹 +涅 +涇 +消 +涉 +涌 +涎 +涑 +涓 +涔 +涕 +涙 +涪 +涫 +涮 +涯 +液 +涵 +涸 +涼 +涿 +淄 +淅 +淆 +淇 +淋 +淌 +淍 +淎 +淏 +淑 +淓 +淖 +淘 +淙 +淚 +淛 +淝 +淞 +淠 +淡 +淤 +淥 +淦 +淨 +淩 +淪 +淫 +淬 +淮 +淯 +淰 +深 +淳 +淵 +淶 +混 +淸 +淹 +淺 +添 +淼 +淽 +渃 +清 +済 +渉 +渋 +渕 +渙 +渚 +減 +渝 +渟 +渠 +渡 +渣 +渤 +渥 +渦 +渫 +測 +渭 +港 +渲 +渴 +游 +渺 +渼 +渽 +渾 +湃 +湄 +湉 +湊 +湍 +湓 +湔 +湖 +湘 +湛 +湜 +湞 +湟 +湣 +湥 +湧 +湫 +湮 +湯 +湳 +湴 +湼 +満 +溁 +溇 +溈 +溉 +溋 +溎 +溏 +源 +準 +溙 +溜 +溝 +溟 +溢 +溥 +溦 +溧 +溪 +溫 +溯 +溱 +溲 +溴 +溵 +溶 +溺 +溼 +滀 +滁 +滂 +滄 +滅 +滇 +滈 +滉 +滋 +滌 +滎 +滏 +滑 +滓 +滔 +滕 +滘 +滙 +滝 +滬 +滯 +滲 +滴 +滷 +滸 +滹 +滻 +滽 +滾 +滿 +漁 +漂 +漆 +漇 +漈 +漎 +漏 +漓 +演 +漕 +漚 +漠 +漢 +漣 +漩 +漪 +漫 +漬 +漯 +漱 +漲 +漳 +漴 +漵 +漷 +漸 +漼 +漾 +漿 +潁 +潑 +潔 +潘 +潛 +潞 +潟 +潢 +潤 +潭 +潮 +潯 +潰 +潲 +潺 +潼 +潽 +潾 +潿 +澀 +澁 +澂 +澄 +澆 +澇 +澈 +澉 +澋 +澌 +澍 +澎 +澔 +澗 +澠 +澡 +澣 +澤 +澥 +澧 +澪 +澮 +澯 +澱 +澳 +澶 +澹 +澻 +激 +濁 +濂 +濃 +濉 +濊 +濋 +濕 +濘 +濙 +濛 +濞 +濟 +濠 +濡 +濤 +濫 +濬 +濮 +濯 +濰 +濱 +濲 +濶 +濺 +濼 +濾 +瀁 +瀅 +瀆 +瀉 +瀍 +瀏 +瀑 +瀔 +瀕 +瀘 +瀚 +瀛 +瀝 +瀞 +瀟 +瀠 +瀣 +瀦 +瀧 +瀨 +瀬 +瀰 +瀲 +瀴 +瀶 +瀹 +瀾 +灃 +灊 +灌 +灑 +灘 +灝 +灞 +灡 +灣 +灤 +灧 +火 +灰 +灴 +灸 +灼 +災 +炁 +炅 +炆 +炊 +炎 +炒 +炔 +炕 +炘 +炙 +炟 +炣 +炤 +炫 +炬 +炭 +炮 +炯 +炱 +炲 +炳 +炷 +炸 +為 +炻 +烈 +烉 +烊 +烋 +烏 +烒 +烔 +烘 +烙 +烜 +烝 +烤 +烯 +烱 +烴 +烷 +烹 +烺 +烽 +焃 +焄 +焉 +焊 +焌 +焓 +焗 +焙 +焚 +焜 +焞 +無 +焦 +焯 +焰 +焱 +焴 +然 +焻 +焼 +焿 +煇 +煉 +煊 +煌 +煎 +煐 +煒 +煔 +煕 +煖 +煙 +煚 +煜 +煞 +煠 +煤 +煥 +煦 +照 +煨 +煩 +煬 +煮 +煲 +煳 +煵 +煶 +煸 +煽 +熄 +熅 +熇 +熈 +熊 +熏 +熒 +熔 +熖 +熗 +熘 +熙 +熜 +熟 +熠 +熤 +熥 +熨 +熬 +熯 +熱 +熲 +熳 +熵 +熹 +熺 +熼 +熾 +熿 +燁 +燃 +燄 +燈 +燉 +燊 +燎 +燏 +燐 +燒 +燔 +燕 +燘 +燙 +燚 +燜 +燝 +營 +燥 +燦 +燧 +燫 +燬 +燭 +燮 +燴 +燹 +燻 +燼 +燾 +燿 +爀 +爆 +爌 +爍 +爐 +爔 +爚 +爛 +爝 +爨 +爪 +爬 +爭 +爯 +爰 +爲 +爵 +父 +爸 +爹 +爺 +爻 +爽 +爾 +爿 +牁 +牂 +牆 +片 +版 +牌 +牒 +牕 +牖 +牘 +牙 +牛 +牝 +牟 +牠 +牡 +牢 +牧 +物 +牯 +牲 +特 +牻 +牼 +牽 +犀 +犁 +犂 +犇 +犍 +犎 +犖 +犛 +犢 +犧 +犨 +犬 +犯 +犰 +犴 +犽 +狀 +狂 +狄 +狍 +狎 +狐 +狒 +狓 +狗 +狙 +狛 +狟 +狠 +狡 +狦 +狨 +狩 +狳 +狶 +狷 +狸 +狹 +狻 +狼 +猁 +猄 +猇 +猊 +猗 +猙 +猛 +猜 +猝 +猞 +猢 +猥 +猨 +猩 +猳 +猴 +猶 +猷 +猺 +猻 +猾 +猿 +獁 +獃 +獄 +獅 +獇 +獎 +獏 +獐 +獒 +獠 +獢 +獣 +獨 +獬 +獮 +獯 +獰 +獲 +獴 +獵 +獷 +獸 +獺 +獻 +獼 +獾 +玀 +玄 +玆 +率 +玉 +王 +玎 +玏 +玓 +玕 +玖 +玗 +玘 +玙 +玟 +玠 +玡 +玢 +玥 +玧 +玨 +玩 +玫 +玭 +玲 +玳 +玶 +玷 +玹 +玻 +玾 +珀 +珂 +珅 +珈 +珉 +珊 +珌 +珍 +珎 +珏 +珖 +珙 +珝 +珞 +珠 +珡 +珣 +珤 +珥 +珦 +珧 +珩 +珪 +班 +珮 +珵 +珹 +珺 +珽 +現 +琁 +球 +琄 +琅 +理 +琇 +琉 +琊 +琍 +琎 +琚 +琛 +琡 +琢 +琤 +琥 +琦 +琨 +琪 +琬 +琮 +琯 +琰 +琱 +琳 +琴 +琵 +琶 +琹 +琺 +琿 +瑀 +瑁 +瑂 +瑄 +瑅 +瑆 +瑈 +瑊 +瑋 +瑑 +瑒 +瑕 +瑗 +瑙 +瑚 +瑛 +瑜 +瑝 +瑞 +瑟 +瑠 +瑢 +瑣 +瑤 +瑥 +瑧 +瑨 +瑩 +瑪 +瑭 +瑯 +瑰 +瑱 +瑳 +瑴 +瑺 +瑾 +璀 +璁 +璃 +璄 +璆 +璇 +璈 +璉 +璋 +璌 +璐 +璕 +璘 +璙 +璚 +璜 +璞 +璟 +璠 +璡 +璣 +璥 +璦 +璧 +璨 +璩 +璪 +璫 +璬 +璮 +環 +璱 +璵 +璸 +璹 +璽 +璿 +瓈 +瓊 +瓌 +瓏 +瓑 +瓔 +瓖 +瓘 +瓚 +瓛 +瓜 +瓞 +瓠 +瓢 +瓣 +瓤 +瓦 +瓮 +瓴 +瓶 +瓷 +瓿 +甂 +甄 +甌 +甍 +甑 +甕 +甘 +甙 +甚 +甜 +生 +甡 +產 +産 +甥 +甦 +用 +甩 +甪 +甫 +甬 +甯 +田 +由 +甲 +申 +男 +甸 +甹 +町 +甾 +畀 +畇 +畈 +畊 +畋 +界 +畎 +畏 +畐 +畑 +畔 +留 +畜 +畝 +畠 +畢 +略 +畦 +畧 +番 +畫 +畬 +畯 +異 +畲 +畳 +畵 +當 +畷 +畸 +畹 +畿 +疃 +疆 +疇 +疊 +疋 +疌 +疍 +疏 +疑 +疒 +疕 +疙 +疚 +疝 +疣 +疤 +疥 +疫 +疲 +疳 +疵 +疸 +疹 +疼 +疽 +疾 +痂 +病 +症 +痊 +痍 +痔 +痕 +痘 +痙 +痛 +痞 +痟 +痠 +痢 +痣 +痤 +痧 +痩 +痰 +痱 +痲 +痴 +痹 +痺 +痿 +瘀 +瘁 +瘊 +瘋 +瘍 +瘓 +瘙 +瘜 +瘞 +瘟 +瘠 +瘡 +瘢 +瘤 +瘦 +瘧 +瘩 +瘰 +瘴 +瘺 +癀 +療 +癆 +癇 +癌 +癒 +癖 +癘 +癜 +癟 +癡 +癢 +癤 +癥 +癩 +癬 +癭 +癮 +癯 +癰 +癱 +癲 +癸 +発 +登 +發 +白 +百 +皂 +的 +皆 +皇 +皈 +皋 +皎 +皐 +皓 +皖 +皙 +皚 +皛 +皝 +皞 +皮 +皰 +皴 +皷 +皸 +皺 +皿 +盂 +盃 +盅 +盆 +盈 +益 +盋 +盌 +盎 +盒 +盔 +盛 +盜 +盞 +盟 +盡 +監 +盤 +盥 +盦 +盧 +盨 +盩 +盪 +盫 +目 +盯 +盱 +盲 +直 +盷 +相 +盹 +盺 +盼 +盾 +眀 +省 +眉 +看 +県 +眙 +眛 +眜 +眞 +真 +眠 +眥 +眨 +眩 +眭 +眯 +眵 +眶 +眷 +眸 +眺 +眼 +眾 +着 +睇 +睛 +睜 +睞 +睡 +睢 +督 +睥 +睦 +睨 +睪 +睫 +睭 +睹 +睺 +睽 +睾 +睿 +瞄 +瞅 +瞋 +瞌 +瞎 +瞑 +瞓 +瞞 +瞢 +瞥 +瞧 +瞪 +瞫 +瞬 +瞭 +瞰 +瞳 +瞻 +瞼 +瞽 +瞿 +矇 +矍 +矗 +矚 +矛 +矜 +矞 +矢 +矣 +知 +矧 +矩 +短 +矮 +矯 +石 +矸 +矽 +砂 +砋 +砌 +砍 +砒 +研 +砝 +砢 +砥 +砦 +砧 +砩 +砫 +砭 +砮 +砯 +砰 +砲 +砳 +破 +砵 +砷 +砸 +砼 +硂 +硃 +硅 +硇 +硏 +硐 +硒 +硓 +硚 +硜 +硝 +硤 +硨 +硫 +硬 +硭 +硯 +硼 +碁 +碇 +碉 +碌 +碎 +碑 +碓 +碕 +碗 +碘 +碚 +碟 +碡 +碣 +碧 +碩 +碪 +碭 +碰 +碲 +碳 +碴 +碶 +碸 +確 +碻 +碼 +碽 +碾 +磁 +磅 +磊 +磋 +磐 +磔 +磕 +磘 +磙 +磚 +磜 +磡 +磨 +磪 +磬 +磯 +磱 +磲 +磵 +磷 +磺 +磻 +磾 +礁 +礄 +礎 +礐 +礑 +礒 +礙 +礠 +礦 +礪 +礫 +礬 +礮 +礱 +礴 +示 +礻 +礽 +社 +祀 +祁 +祂 +祆 +祇 +祈 +祉 +祋 +祏 +祐 +祓 +祕 +祖 +祗 +祙 +祚 +祛 +祜 +祝 +神 +祟 +祠 +祥 +祧 +票 +祭 +祹 +祺 +祼 +祿 +禁 +禃 +禇 +禍 +禎 +福 +禑 +禓 +禔 +禕 +禘 +禛 +禟 +禠 +禤 +禦 +禧 +禨 +禩 +禪 +禮 +禰 +禱 +禵 +禹 +禺 +禼 +禽 +禾 +禿 +秀 +私 +秈 +秉 +秋 +科 +秒 +秕 +秘 +租 +秠 +秣 +秤 +秦 +秧 +秩 +秭 +秳 +秸 +移 +稀 +稅 +稈 +稉 +程 +稍 +稑 +稔 +稗 +稘 +稙 +稚 +稜 +稞 +稟 +稠 +種 +稱 +稲 +稷 +稹 +稺 +稻 +稼 +稽 +稾 +稿 +穀 +穂 +穆 +穈 +穉 +穌 +積 +穎 +穗 +穟 +穠 +穡 +穢 +穣 +穩 +穫 +穰 +穴 +穵 +究 +穹 +空 +穿 +突 +窄 +窅 +窈 +窋 +窒 +窕 +窖 +窗 +窘 +窟 +窠 +窣 +窨 +窩 +窪 +窮 +窯 +窰 +窶 +窺 +窿 +竄 +竅 +竇 +竈 +竊 +立 +竑 +站 +竜 +竟 +章 +竣 +童 +竦 +竩 +竭 +端 +競 +竹 +竺 +竻 +竿 +笄 +笆 +笈 +笏 +笑 +笘 +笙 +笛 +笞 +笠 +笥 +符 +笨 +笩 +笪 +第 +笭 +笮 +笯 +笱 +笳 +笹 +筅 +筆 +等 +筊 +筋 +筌 +筍 +筏 +筐 +筒 +答 +策 +筘 +筠 +筥 +筦 +筧 +筬 +筭 +筱 +筲 +筳 +筵 +筶 +筷 +筻 +箆 +箇 +箋 +箍 +箏 +箐 +箑 +箒 +箔 +箕 +算 +箜 +管 +箬 +箭 +箱 +箴 +箸 +節 +篁 +範 +篆 +篇 +築 +篊 +篋 +篌 +篔 +篙 +篝 +篠 +篡 +篤 +篥 +篦 +篩 +篪 +篭 +篯 +篳 +篷 +簀 +簃 +簇 +簉 +簋 +簍 +簑 +簕 +簗 +簞 +簠 +簡 +簧 +簪 +簫 +簷 +簸 +簹 +簺 +簽 +簾 +簿 +籀 +籃 +籌 +籍 +籐 +籙 +籛 +籜 +籝 +籟 +籠 +籣 +籤 +籥 +籪 +籬 +籮 +籲 +米 +籽 +籾 +粄 +粉 +粍 +粑 +粒 +粕 +粗 +粘 +粟 +粢 +粥 +粦 +粧 +粩 +粱 +粲 +粳 +粵 +粹 +粼 +粽 +精 +粿 +糀 +糅 +糊 +糌 +糍 +糎 +糕 +糖 +糙 +糜 +糝 +糞 +糟 +糠 +糢 +糧 +糬 +糯 +糰 +糴 +糶 +糸 +糹 +糺 +系 +糾 +紀 +紂 +約 +紅 +紆 +紇 +紈 +紉 +紊 +紋 +納 +紐 +紑 +紓 +純 +紕 +紗 +紘 +紙 +級 +紛 +紜 +紝 +紞 +素 +紡 +索 +紫 +紮 +累 +細 +紱 +紲 +紳 +紵 +紹 +紺 +紿 +終 +絃 +組 +絆 +経 +絎 +結 +絕 +絛 +絜 +絞 +絡 +絢 +給 +絨 +絪 +絮 +統 +絲 +絳 +絵 +絶 +絹 +絺 +綁 +綃 +綈 +綉 +綎 +綏 +經 +綖 +継 +続 +綜 +綝 +綞 +綠 +綢 +綣 +綦 +綧 +綫 +綬 +維 +綮 +綰 +綱 +網 +綳 +綴 +綸 +綺 +綻 +綽 +綾 +綿 +緁 +緃 +緄 +緈 +緊 +緋 +総 +緑 +緒 +緖 +緘 +線 +緜 +緝 +緞 +締 +緡 +緣 +緤 +編 +緩 +緬 +緯 +緱 +緲 +練 +緹 +緻 +縂 +縄 +縈 +縉 +縊 +縕 +縛 +縝 +縞 +縠 +縡 +縣 +縤 +縫 +縮 +縯 +縱 +縴 +縵 +縷 +縹 +縻 +總 +績 +繁 +繃 +繆 +繇 +繒 +織 +繕 +繖 +繙 +繚 +繞 +繡 +繩 +繪 +繫 +繭 +繰 +繳 +繹 +繻 +繼 +繽 +繾 +纁 +纂 +纈 +續 +纍 +纏 +纓 +纔 +纕 +纖 +纘 +纛 +纜 +缐 +缶 +缸 +缺 +缽 +罃 +罄 +罅 +罈 +罉 +罌 +罍 +罐 +罔 +罕 +罘 +罟 +罡 +罨 +罩 +罪 +置 +罰 +罱 +署 +罳 +罵 +罶 +罷 +罹 +罽 +羂 +羅 +羆 +羈 +羊 +羋 +羌 +美 +羔 +羕 +羗 +羙 +羚 +羞 +羡 +羣 +群 +羥 +羧 +羨 +義 +羯 +羰 +羱 +羲 +羸 +羹 +羽 +羿 +翀 +翁 +翂 +翃 +翅 +翊 +翌 +翎 +翏 +習 +翔 +翕 +翙 +翜 +翟 +翠 +翡 +翥 +翦 +翩 +翬 +翮 +翰 +翱 +翳 +翹 +翻 +翼 +耀 +老 +考 +耄 +者 +耆 +而 +耍 +耎 +耐 +耑 +耒 +耔 +耕 +耗 +耘 +耙 +耜 +耦 +耨 +耬 +耳 +耵 +耶 +耷 +耽 +耿 +聃 +聆 +聊 +聒 +聖 +聘 +聚 +聞 +聟 +聨 +聯 +聰 +聱 +聲 +聳 +聴 +聶 +職 +聽 +聾 +聿 +肄 +肅 +肆 +肇 +肉 +肋 +肌 +肏 +肖 +肘 +肚 +肛 +肜 +肝 +肟 +股 +肢 +肥 +肩 +肪 +肫 +肯 +肱 +育 +肸 +肹 +肺 +肼 +肽 +胂 +胃 +胄 +胅 +胇 +胊 +背 +胍 +胎 +胖 +胗 +胙 +胚 +胛 +胝 +胞 +胡 +胤 +胥 +胬 +胭 +胰 +胱 +胳 +胴 +胸 +胺 +胼 +能 +脂 +脅 +脆 +脇 +脈 +脊 +脒 +脖 +脘 +脛 +脣 +脩 +脫 +脬 +脭 +脯 +脲 +脳 +脷 +脹 +脾 +腆 +腈 +腊 +腋 +腌 +腎 +腐 +腑 +腓 +腔 +腕 +腥 +腦 +腧 +腩 +腫 +腮 +腰 +腱 +腳 +腴 +腸 +腹 +腺 +腿 +膀 +膂 +膈 +膊 +膏 +膚 +膛 +膜 +膝 +膠 +膣 +膥 +膦 +膨 +膩 +膮 +膳 +膺 +膽 +膾 +膿 +臀 +臂 +臃 +臆 +臉 +臊 +臍 +臏 +臘 +臚 +臞 +臟 +臠 +臣 +臧 +臨 +自 +臭 +臯 +至 +致 +臺 +臻 +臼 +臾 +舂 +舅 +與 +興 +舉 +舊 +舌 +舍 +舎 +舒 +舔 +舖 +舘 +舛 +舜 +舞 +舟 +舢 +舥 +舨 +舩 +航 +舫 +般 +舲 +舵 +舶 +舷 +舸 +船 +舺 +艅 +艇 +艉 +艋 +艎 +艏 +艔 +艘 +艙 +艚 +艦 +艮 +良 +艱 +色 +艶 +艷 +艸 +艽 +艾 +艿 +芃 +芊 +芋 +芍 +芎 +芑 +芒 +芘 +芙 +芛 +芝 +芡 +芥 +芨 +芩 +芪 +芫 +芬 +芭 +芮 +芯 +花 +芳 +芴 +芷 +芸 +芹 +芻 +芽 +芾 +苄 +苅 +苑 +苒 +苓 +苔 +苕 +苗 +苛 +苜 +苝 +苞 +苟 +苡 +苣 +苤 +若 +苦 +苧 +苪 +苫 +苯 +英 +苳 +苴 +苷 +苺 +苻 +苼 +苾 +茀 +茁 +茂 +范 +茄 +茅 +茆 +茇 +茈 +茉 +茌 +茗 +茘 +茚 +茛 +茜 +茝 +茨 +茫 +茬 +茭 +茮 +茯 +茱 +茲 +茴 +茵 +茶 +茷 +茸 +茹 +茺 +茼 +荀 +荃 +荅 +荇 +草 +荊 +荎 +荏 +荒 +荔 +荖 +荘 +荳 +荷 +荸 +荻 +荼 +荽 +莆 +莉 +莊 +莎 +莒 +莓 +莕 +莖 +莘 +莙 +莛 +莜 +莞 +莠 +莢 +莧 +莨 +莩 +莪 +莫 +莽 +莿 +菀 +菁 +菅 +菇 +菈 +菉 +菊 +菌 +菍 +菏 +菑 +菓 +菔 +菖 +菘 +菜 +菝 +菟 +菠 +菡 +菥 +菩 +菪 +菫 +華 +菰 +菱 +菲 +菴 +菶 +菸 +菹 +菺 +菼 +菽 +菾 +萁 +萃 +萄 +萇 +萊 +萌 +萍 +萎 +萐 +萘 +萜 +萠 +萡 +萣 +萩 +萬 +萭 +萱 +萵 +萸 +萹 +萼 +落 +葃 +葆 +葉 +葊 +葎 +葑 +葒 +著 +葙 +葚 +葛 +葜 +葝 +葡 +董 +葦 +葩 +葫 +葬 +葭 +葯 +葰 +葳 +葵 +葶 +葷 +葺 +蒂 +蒄 +蒍 +蒎 +蒐 +蒓 +蒔 +蒗 +蒙 +蒜 +蒞 +蒟 +蒡 +蒢 +蒤 +蒧 +蒨 +蒭 +蒯 +蒲 +蒴 +蒸 +蒹 +蒺 +蒻 +蒼 +蒽 +蒾 +蒿 +蓀 +蓁 +蓂 +蓄 +蓆 +蓉 +蓋 +蓍 +蓑 +蓓 +蓖 +蓘 +蓚 +蓧 +蓨 +蓪 +蓬 +蓭 +蓮 +蓯 +蓳 +蓼 +蓽 +蓿 +蔆 +蔎 +蔑 +蔓 +蔔 +蔕 +蔗 +蔘 +蔚 +蔝 +蔞 +蔡 +蔣 +蔥 +蔦 +蔬 +蔭 +蔴 +蔵 +蔻 +蔽 +蕁 +蕃 +蕅 +蕈 +蕉 +蕊 +蕎 +蕑 +蕒 +蕖 +蕘 +蕙 +蕚 +蕟 +蕡 +蕢 +蕤 +蕨 +蕩 +蕪 +蕭 +蕷 +蕹 +蕺 +蕻 +蕾 +薀 +薄 +薆 +薇 +薈 +薊 +薌 +薏 +薐 +薑 +薔 +薗 +薘 +薙 +薛 +薜 +薞 +薟 +薡 +薦 +薨 +薩 +薪 +薫 +薬 +薯 +薰 +薲 +薷 +薸 +薹 +薺 +薾 +薿 +藁 +藉 +藍 +藎 +藏 +藐 +藔 +藕 +藜 +藝 +藟 +藤 +藥 +藦 +藨 +藩 +藪 +藶 +藸 +藹 +藺 +藻 +藿 +蘂 +蘄 +蘅 +蘆 +蘇 +蘊 +蘋 +蘐 +蘑 +蘓 +蘗 +蘘 +蘚 +蘞 +蘢 +蘧 +蘩 +蘭 +蘵 +蘶 +蘸 +蘼 +蘿 +虉 +虎 +虐 +虓 +虔 +處 +虖 +虛 +虜 +虞 +號 +虢 +虧 +虨 +虯 +虱 +虵 +虹 +虺 +虻 +蚆 +蚊 +蚋 +蚌 +蚍 +蚓 +蚖 +蚜 +蚝 +蚡 +蚢 +蚣 +蚤 +蚧 +蚨 +蚩 +蚪 +蚯 +蚱 +蚴 +蚵 +蚶 +蚺 +蚼 +蛀 +蛄 +蛇 +蛉 +蛋 +蛍 +蛐 +蛑 +蛔 +蛙 +蛛 +蛞 +蛟 +蛤 +蛭 +蛯 +蛸 +蛹 +蛺 +蛻 +蛾 +蜀 +蜂 +蜃 +蜆 +蜇 +蜈 +蜉 +蜊 +蜍 +蜑 +蜒 +蜓 +蜘 +蜚 +蜛 +蜜 +蜞 +蜢 +蜣 +蜥 +蜨 +蜮 +蜯 +蜱 +蜴 +蜷 +蜻 +蜾 +蜿 +蝀 +蝌 +蝍 +蝎 +蝓 +蝕 +蝗 +蝘 +蝙 +蝚 +蝟 +蝠 +蝣 +蝤 +蝦 +蝨 +蝮 +蝯 +蝰 +蝲 +蝴 +蝶 +蝸 +蝽 +螂 +螃 +螄 +螅 +螈 +螋 +融 +螐 +螔 +螞 +螟 +螠 +螢 +螣 +螥 +螫 +螭 +螯 +螳 +螶 +螺 +螻 +螽 +螾 +蟀 +蟄 +蟅 +蟆 +蟊 +蟋 +蟌 +蟎 +蟑 +蟒 +蟜 +蟠 +蟥 +蟪 +蟫 +蟬 +蟯 +蟲 +蟳 +蟴 +蟶 +蟹 +蟻 +蟾 +蠂 +蠃 +蠄 +蠅 +蠆 +蠊 +蠋 +蠍 +蠐 +蠑 +蠓 +蠔 +蠕 +蠖 +蠘 +蠙 +蠟 +蠡 +蠢 +蠣 +蠱 +蠲 +蠵 +蠶 +蠷 +蠹 +蠻 +血 +衂 +衆 +行 +衍 +衎 +術 +衕 +衖 +街 +衙 +衚 +衛 +衜 +衝 +衞 +衡 +衢 +衣 +表 +衩 +衫 +衰 +衲 +衷 +衽 +衾 +衿 +袁 +袂 +袈 +袋 +袍 +袓 +袖 +袛 +袞 +袤 +袪 +被 +袱 +袴 +袾 +裁 +裂 +裊 +裎 +裒 +裔 +裕 +裖 +裘 +裙 +補 +裝 +裟 +裡 +裨 +裬 +裱 +裳 +裴 +裵 +裸 +裹 +製 +裾 +裿 +褀 +褂 +複 +褌 +褍 +褎 +褐 +褒 +褓 +褔 +褘 +褙 +褚 +褞 +褥 +褧 +褪 +褫 +褭 +褲 +褶 +褸 +褻 +襄 +襌 +襖 +襞 +襟 +襠 +襤 +襦 +襪 +襯 +襲 +襴 +襶 +襻 +襾 +西 +要 +覃 +覆 +覇 +覈 +見 +覌 +規 +覓 +視 +覚 +覡 +覦 +覧 +親 +覬 +覲 +観 +覺 +覽 +覿 +觀 +角 +觔 +觙 +觚 +觜 +解 +觭 +觱 +觴 +觶 +觸 +觿 +言 +訁 +訂 +訃 +訇 +計 +訊 +訌 +討 +訏 +訐 +訒 +訓 +訔 +訕 +訖 +託 +記 +訛 +訝 +訟 +訣 +訥 +訪 +設 +許 +訴 +訶 +診 +註 +証 +訾 +詁 +詆 +詈 +詐 +詒 +詔 +評 +詛 +詞 +詠 +詡 +詢 +詣 +詥 +試 +詧 +詩 +詫 +詭 +詮 +詰 +話 +該 +詳 +詵 +詹 +詼 +誄 +誅 +誇 +誌 +認 +誒 +誓 +誕 +誘 +語 +誠 +誡 +誣 +誤 +誥 +誦 +誨 +說 +説 +読 +誰 +課 +誴 +誹 +誼 +誾 +調 +談 +請 +諍 +諏 +諒 +論 +諗 +諜 +諟 +諠 +諡 +諤 +諦 +諧 +諪 +諫 +諭 +諮 +諱 +諲 +諳 +諴 +諶 +諷 +諸 +諺 +諼 +諾 +謀 +謁 +謂 +謄 +謇 +謊 +謌 +謎 +謏 +謐 +謔 +謖 +謗 +謙 +謚 +講 +謜 +謝 +謠 +謢 +謤 +謨 +謩 +謫 +謬 +謳 +謹 +謾 +證 +譏 +譓 +譔 +識 +譙 +譚 +譜 +譞 +警 +譫 +譬 +譭 +譯 +議 +譲 +譳 +譴 +護 +譽 +譿 +讀 +讃 +變 +讌 +讎 +讓 +讖 +讙 +讚 +讜 +讞 +谷 +谿 +豁 +豆 +豇 +豈 +豉 +豊 +豌 +豎 +豐 +豔 +豕 +豚 +象 +豢 +豨 +豪 +豫 +豬 +豳 +豸 +豹 +豺 +豿 +貂 +貅 +貉 +貊 +貌 +貐 +貒 +貓 +貔 +貘 +貝 +貞 +負 +財 +貢 +貤 +貧 +貨 +販 +貪 +貫 +責 +貭 +貮 +貯 +貲 +貳 +貴 +貶 +買 +貸 +貺 +費 +貼 +貽 +貿 +賀 +賁 +賂 +賃 +賄 +資 +賈 +賊 +賑 +賒 +賓 +賔 +賕 +賚 +賜 +賞 +賠 +賡 +賢 +賣 +賤 +賦 +賨 +質 +賬 +賭 +賴 +賹 +賺 +賻 +購 +賽 +賾 +贄 +贅 +贇 +贈 +贊 +贌 +贍 +贏 +贓 +贔 +贖 +贛 +赤 +赦 +赧 +赫 +赬 +赭 +走 +赳 +赴 +起 +趁 +超 +越 +趐 +趕 +趖 +趙 +趟 +趣 +趨 +足 +趴 +趵 +趺 +趼 +趾 +跅 +跆 +跋 +跌 +跏 +跑 +跖 +跗 +跛 +距 +跟 +跡 +跣 +跤 +跨 +跩 +跪 +路 +跳 +踎 +踏 +踐 +踝 +踞 +踢 +踩 +踰 +踴 +踹 +踺 +蹂 +蹄 +蹇 +蹈 +蹉 +蹊 +蹋 +蹕 +蹙 +蹟 +蹠 +蹤 +蹦 +蹬 +蹭 +蹯 +蹲 +蹴 +蹶 +蹺 +蹻 +蹼 +躁 +躂 +躄 +躉 +躋 +躍 +躑 +躒 +躔 +躝 +躪 +身 +躬 +躰 +躲 +躺 +軀 +車 +軋 +軌 +軍 +軎 +軒 +軔 +軛 +軟 +転 +軫 +軲 +軸 +軹 +軺 +軻 +軼 +軽 +軾 +較 +輄 +輅 +載 +輋 +輒 +輓 +輔 +輕 +輛 +輝 +輞 +輟 +輥 +輦 +輩 +輪 +輬 +輭 +輯 +輶 +輸 +輻 +輾 +輿 +轀 +轂 +轄 +轅 +轆 +轉 +轍 +轎 +轘 +轝 +轟 +轤 +辛 +辜 +辟 +辣 +辦 +辧 +辨 +辭 +辮 +辯 +辰 +辱 +農 +辵 +辺 +辻 +込 +迂 +迄 +迅 +迎 +近 +返 +迢 +迤 +迥 +迦 +迪 +迫 +迭 +迮 +述 +迴 +迵 +迷 +迸 +迺 +追 +退 +送 +逃 +逄 +逅 +逆 +逈 +逋 +逌 +逍 +逎 +透 +逐 +逑 +途 +逕 +逖 +逗 +這 +通 +逛 +逝 +逞 +速 +造 +逢 +連 +逤 +逨 +逮 +逯 +進 +逴 +逵 +逸 +逹 +逺 +逼 +逾 +遁 +遂 +遄 +遇 +遊 +運 +遍 +過 +遏 +遐 +遒 +道 +達 +違 +遘 +遙 +遛 +遜 +遞 +遠 +遢 +遣 +遨 +適 +遭 +遮 +遯 +遲 +遴 +遵 +遶 +遷 +選 +遹 +遺 +遼 +避 +邀 +邁 +邂 +邃 +還 +邇 +邈 +邉 +邊 +邋 +邏 +邑 +邕 +邗 +邙 +邛 +邠 +邡 +邢 +那 +邦 +邨 +邪 +邯 +邰 +邱 +邲 +邳 +邴 +邵 +邸 +邽 +邾 +郁 +郃 +郄 +郅 +郇 +郊 +郋 +郎 +郗 +郛 +郜 +郝 +郞 +郟 +郡 +郢 +郤 +部 +郪 +郫 +郭 +郯 +郳 +郴 +郵 +郷 +都 +郾 +郿 +鄂 +鄃 +鄄 +鄆 +鄉 +鄋 +鄑 +鄒 +鄔 +鄖 +鄗 +鄘 +鄙 +鄚 +鄜 +鄞 +鄠 +鄢 +鄣 +鄤 +鄧 +鄩 +鄫 +鄭 +鄯 +鄰 +鄱 +鄲 +鄳 +鄴 +鄺 +酃 +酆 +酈 +酉 +酊 +酋 +酌 +配 +酎 +酏 +酐 +酒 +酔 +酗 +酚 +酞 +酡 +酢 +酣 +酥 +酩 +酪 +酬 +酮 +酯 +酰 +酴 +酵 +酶 +酷 +酸 +酺 +酼 +醁 +醂 +醃 +醅 +醇 +醉 +醋 +醌 +醍 +醐 +醒 +醚 +醛 +醜 +醞 +醢 +醣 +醪 +醫 +醬 +醮 +醯 +醴 +醺 +醾 +醿 +釀 +釁 +釆 +采 +釉 +釋 +里 +重 +野 +量 +釐 +金 +釒 +釓 +釔 +釕 +釗 +釘 +釙 +釚 +釜 +針 +釣 +釤 +釦 +釧 +釩 +釪 +釭 +釴 +釵 +釷 +釹 +釺 +鈀 +鈁 +鈄 +鈇 +鈈 +鈉 +鈊 +鈍 +鈏 +鈐 +鈑 +鈔 +鈕 +鈖 +鈞 +鈢 +鈣 +鈥 +鈦 +鈫 +鈮 +鈰 +鈳 +鈴 +鈷 +鈸 +鈹 +鈺 +鈾 +鈿 +鉀 +鉄 +鉅 +鉆 +鉈 +鉉 +鉋 +鉌 +鉍 +鉏 +鉑 +鉓 +鉗 +鉚 +鉛 +鉞 +鉟 +鉤 +鉦 +鉬 +鉭 +鉲 +鉶 +鉷 +鉸 +鉻 +鉾 +鉿 +銀 +銂 +銃 +銅 +銋 +銍 +銑 +銓 +銕 +銖 +銘 +銚 +銜 +銠 +銣 +銥 +銦 +銨 +銩 +銪 +銫 +銬 +銭 +銱 +銲 +銳 +銶 +銷 +銹 +銻 +銼 +銾 +鋁 +鋅 +鋆 +鋇 +鋌 +鋏 +鋐 +鋒 +鋕 +鋗 +鋙 +鋡 +鋤 +鋥 +鋦 +鋨 +鋪 +鋮 +鋯 +鋰 +鋱 +鋳 +鋶 +鋸 +鋹 +鋼 +錀 +錄 +錏 +錐 +錒 +錕 +錘 +錚 +錞 +錟 +錠 +錡 +錢 +錦 +錨 +錫 +錬 +錮 +錯 +錳 +錶 +錸 +錻 +鍀 +鍇 +鍈 +鍉 +鍊 +鍋 +鍍 +鍏 +鍔 +鍘 +鍛 +鍝 +鍟 +鍠 +鍥 +鍩 +鍬 +鍱 +鍳 +鍵 +鍶 +鍷 +鍺 +鍼 +鍾 +鎂 +鎅 +鎊 +鎌 +鎏 +鎓 +鎔 +鎖 +鎗 +鎘 +鎚 +鎛 +鎢 +鎣 +鎦 +鎧 +鎪 +鎬 +鎭 +鎮 +鎰 +鎳 +鎵 +鎻 +鏃 +鏇 +鏈 +鏊 +鏌 +鏐 +鏑 +鏓 +鏖 +鏗 +鏘 +鏜 +鏝 +鏞 +鏟 +鏡 +鏢 +鏤 +鏦 +鏳 +鏴 +鏵 +鏷 +鏻 +鏽 +鐃 +鐇 +鐈 +鐓 +鐔 +鐘 +鐙 +鐠 +鐡 +鐤 +鐦 +鐧 +鐫 +鐬 +鐭 +鐮 +鐲 +鐳 +鐵 +鐸 +鐺 +鐽 +鐿 +鑀 +鑁 +鑂 +鑄 +鑅 +鑊 +鑌 +鑑 +鑒 +鑛 +鑠 +鑣 +鑨 +鑪 +鑫 +鑭 +鑰 +鑲 +鑴 +鑷 +鑼 +鑽 +鑾 +鑿 +長 +門 +閂 +閃 +閆 +閉 +開 +閎 +閏 +閑 +閒 +間 +閔 +閘 +閜 +閞 +閟 +関 +閣 +閥 +閦 +閨 +閩 +閬 +閭 +閰 +閱 +閶 +閹 +閻 +閼 +閾 +閿 +闆 +闇 +闈 +闊 +闋 +闌 +闍 +闐 +闓 +闔 +闕 +闖 +闘 +關 +闞 +闡 +闢 +闥 +阜 +阝 +阡 +阪 +阭 +阮 +阯 +阱 +防 +阻 +阿 +陀 +陁 +陂 +附 +陋 +陌 +降 +限 +陔 +陘 +陛 +陜 +陝 +陞 +陟 +陡 +院 +陣 +除 +陪 +陬 +陰 +陲 +陳 +陵 +陶 +陷 +陸 +険 +陽 +隄 +隅 +隆 +隈 +隊 +隋 +隍 +階 +隔 +隕 +隗 +隘 +隙 +際 +障 +隣 +隧 +隨 +險 +隰 +隱 +隲 +隳 +隴 +隷 +隸 +隹 +隻 +隼 +雀 +雁 +雄 +雅 +集 +雇 +雉 +雋 +雌 +雍 +雎 +雑 +雒 +雕 +雖 +雙 +雛 +雜 +雝 +雞 +離 +難 +雨 +雩 +雪 +雫 +雯 +雱 +雲 +零 +雷 +雹 +電 +需 +霄 +霅 +霆 +震 +霈 +霉 +霊 +霍 +霎 +霏 +霑 +霓 +霖 +霙 +霜 +霞 +霤 +霧 +霨 +霰 +露 +霶 +霸 +霹 +霽 +霾 +靁 +靂 +靄 +靈 +靉 +靑 +青 +靖 +靚 +靛 +靜 +非 +靠 +靡 +面 +革 +靫 +靬 +靭 +靳 +靴 +靶 +靺 +靼 +鞅 +鞆 +鞋 +鞍 +鞏 +鞘 +鞞 +鞠 +鞣 +鞥 +鞦 +鞨 +鞭 +鞮 +鞴 +韁 +韃 +韆 +韋 +韌 +韑 +韓 +韙 +韜 +韞 +韠 +韡 +韭 +韮 +音 +韶 +韺 +韻 +韾 +響 +頁 +頂 +頃 +項 +順 +須 +頊 +頌 +頍 +頎 +頏 +預 +頑 +頒 +頓 +頔 +頗 +領 +頜 +頠 +頡 +頤 +頦 +頫 +頭 +頰 +頴 +頵 +頷 +頸 +頹 +頻 +頼 +顆 +題 +額 +顎 +顏 +顒 +顓 +顔 +顕 +顗 +願 +顙 +顛 +類 +顥 +顧 +顫 +顯 +顰 +顱 +顳 +顴 +風 +颮 +颯 +颱 +颶 +颺 +颼 +飄 +飆 +飈 +飛 +食 +飠 +飡 +飢 +飥 +飩 +飪 +飫 +飬 +飭 +飮 +飯 +飲 +飴 +飼 +飽 +飾 +餃 +餄 +餅 +餉 +養 +餌 +餎 +餐 +餒 +餓 +餗 +餘 +餚 +餛 +餞 +餠 +餡 +館 +餮 +餵 +餺 +餾 +餿 +饃 +饅 +饋 +饌 +饑 +饒 +饕 +饗 +饞 +饟 +饢 +首 +馗 +馘 +香 +馛 +馥 +馦 +馨 +馬 +馭 +馮 +馯 +馱 +馳 +馴 +馼 +駁 +駄 +駅 +駆 +駐 +駑 +駒 +駔 +駕 +駘 +駙 +駛 +駝 +駟 +駢 +駭 +駰 +駱 +駿 +騁 +騂 +騄 +騅 +騋 +騎 +騏 +験 +騖 +騙 +騤 +騨 +騫 +騭 +騮 +騰 +騶 +騷 +騾 +驁 +驃 +驄 +驅 +驊 +驌 +驍 +驎 +驒 +驕 +驗 +驚 +驛 +驟 +驢 +驤 +驥 +驩 +驪 +骨 +骯 +骰 +骶 +骷 +骸 +骼 +髀 +髂 +髎 +髏 +髑 +髒 +髓 +體 +高 +髙 +髡 +髦 +髪 +髭 +髮 +髯 +髲 +髷 +髹 +髻 +鬃 +鬄 +鬅 +鬆 +鬍 +鬚 +鬟 +鬢 +鬣 +鬥 +鬧 +鬨 +鬩 +鬪 +鬬 +鬮 +鬯 +鬱 +鬲 +鬹 +鬻 +鬼 +魁 +魂 +魃 +魄 +魅 +魈 +魋 +魍 +魎 +魏 +魔 +魕 +魘 +魚 +魛 +魞 +魟 +魣 +魨 +魩 +魮 +魯 +魴 +魷 +鮀 +鮁 +鮃 +鮄 +鮊 +鮋 +鮍 +鮐 +鮑 +鮒 +鮓 +鮗 +鮜 +鮟 +鮠 +鮡 +鮣 +鮨 +鮪 +鮫 +鮭 +鮮 +鮰 +鮸 +鮹 +鮻 +鯀 +鯁 +鯃 +鯇 +鯉 +鯊 +鯏 +鯒 +鯓 +鯔 +鯕 +鯖 +鯗 +鯙 +鯛 +鯡 +鯢 +鯤 +鯧 +鯨 +鯪 +鯭 +鯮 +鯰 +鯶 +鯷 +鯻 +鯽 +鯿 +鰂 +鰃 +鰆 +鰈 +鰉 +鰍 +鰏 +鰒 +鰓 +鰕 +鰗 +鰛 +鰜 +鰟 +鰣 +鰤 +鰧 +鰨 +鰩 +鰭 +鰮 +鰱 +鰲 +鰳 +鰶 +鰷 +鰹 +鰺 +鰻 +鰼 +鰾 +鱀 +鱂 +鱅 +鱇 +鱈 +鱉 +鱊 +鱒 +鱓 +鱔 +鱖 +鱗 +鱘 +鱚 +鱝 +鱟 +鱠 +鱣 +鱥 +鱧 +鱨 +鱬 +鱮 +鱰 +鱲 +鱵 +鱷 +鱸 +鱺 +鱻 +鳥 +鳧 +鳩 +鳯 +鳰 +鳳 +鳴 +鳶 +鳽 +鴆 +鴇 +鴉 +鴒 +鴓 +鴕 +鴗 +鴛 +鴝 +鴞 +鴟 +鴡 +鴣 +鴦 +鴨 +鴫 +鴯 +鴰 +鴴 +鴻 +鴿 +鵂 +鵄 +鵎 +鵐 +鵑 +鵒 +鵓 +鵙 +鵜 +鵝 +鵞 +鵟 +鵠 +鵡 +鵪 +鵬 +鵯 +鵰 +鵲 +鵵 +鵼 +鵾 +鶆 +鶇 +鶉 +鶏 +鶒 +鶓 +鶘 +鶚 +鶡 +鶥 +鶩 +鶬 +鶯 +鶲 +鶴 +鶹 +鶺 +鶻 +鶼 +鶿 +鷂 +鷄 +鷉 +鷎 +鷓 +鷗 +鷙 +鷚 +鷟 +鷥 +鷦 +鷫 +鷯 +鷲 +鷳 +鷸 +鷹 +鷺 +鸊 +鸌 +鸐 +鸑 +鸕 +鸘 +鸚 +鸛 +鸜 +鸝 +鸞 +鹮 +鹵 +鹹 +鹼 +鹽 +鹿 +麂 +麅 +麇 +麈 +麊 +麋 +麐 +麒 +麓 +麗 +麝 +麞 +麟 +麥 +麩 +麪 +麯 +麴 +麵 +麹 +麺 +麻 +麼 +麽 +麾 +麿 +黁 +黃 +黇 +黌 +黍 +黎 +黏 +黐 +黑 +黒 +黔 +默 +黙 +黛 +黜 +黝 +點 +黟 +黥 +黧 +黨 +黯 +黴 +黶 +黻 +黼 +黽 +黿 +鼂 +鼇 +鼈 +鼉 +鼎 +鼐 +鼒 +鼓 +鼕 +鼙 +鼠 +鼢 +鼩 +鼬 +鼯 +鼱 +鼴 +鼷 +鼻 +鼽 +鼾 +齊 +齋 +齒 +齕 +齡 +齣 +齦 +齧 +齲 +齶 +龍 +龎 +龐 +龑 +龔 +龕 +龜 +龝 +龠 +龢 +郎 +凉 +﹑ +﹗ +﹝ +﹞ +﹢ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +A +B +C +D +E +F +G +H +I +K +L +M +N +O +P +R +S +T +U +V +W +Y +Z +[ +] +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +r +s +t +u +z +{ +| +} +~ +¥ +𣇉 + diff --git a/modules/pytorchocr/utils/dict/chinese_cht_dict.txt b/modules/pytorchocr/utils/dict/chinese_cht_dict.txt new file mode 100644 index 0000000..cc1aa47 --- /dev/null +++ b/modules/pytorchocr/utils/dict/chinese_cht_dict.txt @@ -0,0 +1,8421 @@ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +¥ +® +° +± +² +´ +· +» +É +Ë +Ó +× +Ü +à +á +ä +è +é +ì +í +ò +ó +÷ +ú +ü +ā +ē +ī +ō +ū +ǐ +ǒ +ɔ +ɡ +ʌ +ˋ +Λ +Ο +Φ +Ω +α +β +ε +θ +μ +π +З +И +Й +П +Я +г +— +‖ +‘ +’ +“ +” +• +… +‧ +′ +″ +※ +℃ +№ +™ +Ⅱ +Ⅲ +Ⅳ +← +↑ +→ +↓ +⇋ +∈ +∑ +√ +∞ +∣ +∧ +∩ +∫ +∶ +≈ +≠ +≤ +≥ +⊙ +⊥ +① +② +③ +④ +⑧ +⑴ +⑵ +⑶ +─ +│ +┅ +┌ +├ +█ +▎ +▏ +▕ +■ +□ +▪ +▲ +△ +▼ +◆ +◇ +○ +◎ +● +◥ +★ +☆ +❋ +❤ +  +、 +。 +〇 +〉 +《 +》 +「 +」 +『 +』 +【 +】 +〔 +〕 +〖 +〗 +の +サ +シ +ジ +マ +ㄱ +ㆍ +㎏ +㎡ +㐂 +㐱 +㙟 +㴪 +㸃 +䖝 +䝉 +䰾 +䲁 +一 +丁 +七 +丄 +丈 +三 +上 +下 +丌 +不 +与 +丏 +丐 +丑 +且 +丕 +世 +丘 +丙 +丞 +丟 +両 +並 +丨 +丫 +中 +丰 +串 +丶 +丸 +丹 +主 +丼 +丿 +乂 +乃 +久 +么 +之 +乍 +乎 +乏 +乒 +乓 +乖 +乗 +乘 +乙 +乚 +乜 +九 +乞 +也 +乩 +乭 +乳 +乸 +乹 +乾 +亀 +亂 +亅 +了 +予 +亊 +事 +二 +亍 +云 +互 +亓 +五 +井 +亘 +些 +亜 +亞 +亟 +亠 +亡 +亢 +交 +亥 +亦 +亨 +享 +京 +亭 +亮 +亰 +亳 +亶 +亹 +人 +亻 +什 +仁 +仂 +仃 +仄 +仇 +仉 +今 +介 +仍 +仏 +仔 +仕 +他 +仗 +付 +仙 +仛 +仝 +仞 +仟 +仡 +代 +令 +以 +仨 +仫 +仮 +仰 +仲 +仳 +仵 +件 +仺 +任 +仼 +份 +仿 +企 +伃 +伈 +伉 +伊 +伋 +伍 +伎 +伏 +伐 +休 +伕 +伙 +伝 +伢 +伯 +估 +伱 +伴 +伶 +伷 +伸 +伺 +似 +伽 +伾 +佀 +佁 +佃 +但 +佇 +佈 +佉 +佋 +位 +低 +住 +佐 +佑 +体 +佔 +何 +佗 +佘 +余 +佚 +佛 +作 +佝 +佞 +佟 +你 +佣 +佤 +佧 +佩 +佬 +佯 +佰 +佳 +併 +佶 +佹 +佺 +佼 +佾 +使 +侁 +侃 +侄 +侅 +來 +侈 +侊 +例 +侍 +侏 +侑 +侖 +侗 +侘 +侚 +供 +依 +侞 +価 +侮 +侯 +侵 +侶 +侷 +侹 +便 +俁 +係 +促 +俄 +俅 +俊 +俋 +俌 +俍 +俎 +俏 +俐 +俑 +俗 +俘 +俚 +俛 +保 +俞 +俟 +俠 +信 +俬 +修 +俯 +俱 +俳 +俴 +俵 +俶 +俸 +俺 +俽 +俾 +倆 +倈 +倉 +個 +倌 +倍 +們 +倒 +倓 +倔 +倖 +倗 +倘 +候 +倚 +倜 +倞 +借 +倡 +倢 +倣 +値 +倦 +倧 +倩 +倪 +倫 +倬 +倭 +倮 +倻 +值 +偁 +偃 +假 +偈 +偉 +偊 +偌 +偍 +偎 +偏 +偓 +偕 +做 +停 +健 +偪 +偲 +側 +偵 +偶 +偷 +偸 +偽 +傀 +傃 +傅 +傈 +傉 +傍 +傑 +傒 +傕 +傖 +傘 +備 +傜 +傢 +傣 +催 +傭 +傲 +傳 +債 +傷 +傻 +傾 +僅 +僉 +僊 +働 +像 +僑 +僔 +僕 +僖 +僙 +僚 +僜 +僡 +僧 +僩 +僭 +僮 +僰 +僱 +僳 +僴 +僵 +價 +僻 +儀 +儁 +儂 +億 +儆 +儇 +儈 +儉 +儋 +儐 +儒 +儔 +儕 +儘 +儚 +儞 +償 +儡 +儥 +儦 +優 +儫 +儱 +儲 +儷 +儺 +儻 +儼 +兀 +允 +元 +兄 +充 +兆 +先 +光 +克 +兌 +免 +児 +兒 +兔 +兕 +兗 +兜 +入 +內 +全 +兩 +兪 +八 +公 +六 +兮 +共 +兵 +其 +具 +典 +兼 +兿 +冀 +冂 +円 +冇 +冉 +冊 +再 +冏 +冑 +冒 +冕 +冖 +冗 +冚 +冠 +冢 +冤 +冥 +冧 +冨 +冪 +冫 +冬 +冮 +冰 +冴 +冶 +冷 +冼 +冽 +凃 +凄 +准 +凈 +凋 +凌 +凍 +凖 +凜 +凝 +凞 +几 +凡 +処 +凪 +凬 +凰 +凱 +凳 +凵 +凶 +凸 +凹 +出 +函 +刀 +刁 +刂 +刃 +刄 +分 +切 +刈 +刊 +刎 +刑 +划 +列 +初 +判 +別 +刦 +刧 +刨 +利 +刪 +刮 +到 +制 +刷 +券 +刺 +刻 +刼 +剁 +剃 +則 +削 +剋 +剌 +前 +剎 +剏 +剔 +剖 +剛 +剝 +剡 +剣 +剩 +剪 +剮 +副 +割 +創 +剿 +劃 +劄 +劇 +劈 +劉 +劊 +劌 +劍 +劑 +劔 +力 +功 +加 +劣 +助 +努 +劫 +劬 +劭 +劵 +効 +劼 +劾 +勁 +勃 +勅 +勇 +勉 +勐 +勑 +勒 +勔 +動 +勖 +勗 +勘 +務 +勛 +勝 +勞 +募 +勢 +勣 +勤 +勦 +勰 +勱 +勲 +勳 +勵 +勷 +勸 +勺 +勻 +勾 +勿 +匂 +匄 +包 +匆 +匈 +匋 +匍 +匏 +匐 +匕 +化 +北 +匙 +匚 +匝 +匠 +匡 +匣 +匪 +匯 +匱 +匸 +匹 +匾 +匿 +區 +十 +千 +卅 +升 +午 +卉 +半 +卋 +卍 +卐 +卑 +卒 +卓 +協 +南 +博 +卜 +卞 +卟 +占 +卡 +卣 +卦 +卧 +卩 +卬 +卮 +卯 +印 +危 +卲 +即 +卵 +卷 +卸 +卹 +卺 +卻 +卽 +卿 +厄 +厓 +厔 +厙 +厚 +厝 +原 +厥 +厭 +厰 +厲 +厴 +厶 +去 +參 +叄 +又 +叉 +及 +友 +反 +収 +叔 +叕 +取 +受 +叛 +叟 +叡 +叢 +口 +古 +句 +另 +叨 +叩 +只 +叫 +召 +叭 +叮 +可 +台 +叱 +史 +右 +叵 +司 +叻 +叼 +吁 +吃 +各 +吆 +合 +吉 +吊 +吋 +同 +名 +后 +吏 +吐 +向 +吒 +吔 +吖 +君 +吝 +吞 +吟 +吠 +吡 +吥 +否 +吧 +吩 +含 +吮 +吱 +吲 +吳 +吵 +吶 +吸 +吹 +吻 +吼 +吾 +呀 +呂 +呃 +呈 +呉 +告 +呋 +呎 +呢 +呤 +呦 +周 +呱 +味 +呵 +呷 +呸 +呼 +命 +呾 +咀 +咁 +咂 +咄 +咅 +咆 +咋 +和 +咎 +咑 +咒 +咔 +咕 +咖 +咗 +咘 +咚 +咟 +咤 +咥 +咧 +咨 +咩 +咪 +咫 +咬 +咭 +咯 +咱 +咲 +咳 +咸 +咻 +咼 +咽 +咾 +咿 +哀 +品 +哂 +哄 +哆 +哇 +哈 +哉 +哌 +哎 +哏 +哐 +哖 +哚 +哞 +員 +哥 +哦 +哨 +哩 +哪 +哭 +哮 +哱 +哲 +哺 +哼 +唃 +唄 +唆 +唇 +唉 +唏 +唐 +唑 +唔 +唘 +唧 +唫 +唬 +唭 +售 +唯 +唱 +唳 +唵 +唷 +唸 +唻 +唾 +啁 +啃 +啄 +商 +啉 +啊 +啍 +問 +啓 +啖 +啚 +啜 +啞 +啟 +啡 +啣 +啤 +啥 +啦 +啪 +啫 +啯 +啰 +啱 +啲 +啵 +啶 +啷 +啻 +啼 +啾 +喀 +喂 +喃 +善 +喆 +喇 +喈 +喉 +喊 +喋 +喏 +喔 +喘 +喙 +喚 +喜 +喝 +喢 +喦 +喧 +喪 +喫 +喬 +單 +喰 +喱 +喲 +喳 +喵 +喹 +喻 +喼 +嗄 +嗅 +嗆 +嗇 +嗊 +嗎 +嗑 +嗒 +嗓 +嗔 +嗖 +嗚 +嗜 +嗝 +嗞 +嗡 +嗢 +嗣 +嗦 +嗨 +嗩 +嗪 +嗮 +嗯 +嗲 +嗶 +嗹 +嗽 +嘀 +嘅 +嘆 +嘉 +嘌 +嘍 +嘎 +嘏 +嘔 +嘗 +嘚 +嘛 +嘜 +嘞 +嘟 +嘢 +嘣 +嘥 +嘧 +嘩 +嘬 +嘮 +嘯 +嘰 +嘲 +嘴 +嘶 +嘸 +嘹 +嘻 +嘿 +噁 +噌 +噍 +噏 +噓 +噗 +噝 +噠 +噢 +噤 +噥 +噦 +器 +噩 +噪 +噬 +噯 +噰 +噲 +噴 +噶 +噸 +噹 +噻 +嚇 +嚈 +嚎 +嚏 +嚐 +嚒 +嚓 +嚕 +嚗 +嚙 +嚞 +嚟 +嚤 +嚦 +嚧 +嚨 +嚩 +嚮 +嚳 +嚴 +嚶 +嚷 +嚼 +嚿 +囀 +囂 +囃 +囉 +囊 +囍 +囑 +囒 +囓 +囗 +囚 +四 +囝 +回 +因 +囡 +団 +囤 +囧 +囪 +囮 +囯 +困 +囲 +図 +囶 +囷 +囹 +固 +囿 +圂 +圃 +圄 +圈 +圉 +國 +圍 +圏 +園 +圓 +圖 +圗 +團 +圜 +土 +圧 +在 +圩 +圪 +圭 +圯 +地 +圳 +圻 +圾 +址 +均 +坊 +坋 +坌 +坍 +坎 +坐 +坑 +坖 +坡 +坣 +坤 +坦 +坨 +坩 +坪 +坫 +坬 +坭 +坮 +坯 +坳 +坵 +坶 +坷 +坻 +垂 +垃 +垈 +型 +垍 +垓 +垕 +垚 +垛 +垞 +垟 +垠 +垢 +垣 +垮 +垯 +垰 +垵 +垸 +垻 +垿 +埃 +埅 +埇 +埈 +埋 +埌 +城 +埏 +埒 +埔 +埕 +埗 +埜 +域 +埠 +埡 +埤 +埧 +埨 +埪 +埭 +埮 +埴 +埵 +執 +培 +基 +埻 +埼 +堀 +堂 +堃 +堅 +堆 +堇 +堈 +堉 +堊 +堍 +堖 +堝 +堡 +堤 +堦 +堪 +堮 +堯 +堰 +報 +場 +堵 +堷 +堺 +塀 +塅 +塆 +塊 +塋 +塌 +塍 +塏 +塑 +塔 +塗 +塘 +塙 +塜 +塞 +塡 +塢 +塤 +塨 +塩 +填 +塬 +塭 +塰 +塱 +塲 +塵 +塹 +塽 +塾 +墀 +境 +墅 +墉 +墊 +墎 +墓 +増 +墘 +墜 +增 +墟 +墡 +墣 +墨 +墩 +墫 +墬 +墮 +墱 +墳 +墺 +墼 +墾 +壁 +壄 +壆 +壇 +壋 +壌 +壎 +壐 +壑 +壓 +壔 +壕 +壘 +壙 +壞 +壟 +壠 +壢 +壤 +壩 +士 +壬 +壯 +壱 +壴 +壹 +壺 +壽 +夀 +夆 +変 +夊 +夋 +夌 +夏 +夔 +夕 +外 +夙 +多 +夜 +夠 +夢 +夤 +夥 +大 +天 +太 +夫 +夬 +夭 +央 +夯 +失 +夷 +夾 +奀 +奄 +奇 +奈 +奉 +奎 +奏 +奐 +契 +奓 +奔 +奕 +套 +奘 +奚 +奠 +奢 +奣 +奧 +奩 +奪 +奫 +奭 +奮 +女 +奴 +奶 +她 +好 +妀 +妁 +如 +妃 +妄 +妊 +妍 +妏 +妑 +妒 +妓 +妖 +妙 +妝 +妞 +妠 +妤 +妥 +妧 +妨 +妭 +妮 +妯 +妲 +妳 +妸 +妹 +妺 +妻 +妾 +姀 +姁 +姃 +姆 +姈 +姉 +姊 +始 +姌 +姍 +姐 +姑 +姒 +姓 +委 +姚 +姜 +姝 +姣 +姥 +姦 +姨 +姪 +姫 +姬 +姮 +姵 +姶 +姸 +姻 +姿 +威 +娃 +娉 +娋 +娌 +娍 +娎 +娑 +娖 +娘 +娛 +娜 +娟 +娠 +娣 +娥 +娩 +娫 +娳 +娶 +娸 +娼 +娽 +婀 +婁 +婆 +婉 +婊 +婑 +婕 +婚 +婢 +婦 +婧 +婪 +婭 +婯 +婷 +婺 +婻 +婼 +婿 +媃 +媄 +媊 +媐 +媒 +媓 +媖 +媗 +媚 +媛 +媜 +媞 +媧 +媭 +媯 +媲 +媳 +媺 +媼 +媽 +媾 +媿 +嫁 +嫂 +嫄 +嫈 +嫉 +嫌 +嫖 +嫘 +嫚 +嫡 +嫣 +嫦 +嫩 +嫪 +嫲 +嫳 +嫵 +嫺 +嫻 +嬅 +嬈 +嬉 +嬋 +嬌 +嬗 +嬛 +嬝 +嬡 +嬤 +嬨 +嬪 +嬬 +嬭 +嬰 +嬴 +嬸 +嬾 +嬿 +孀 +孃 +孆 +孋 +孌 +子 +孑 +孔 +孕 +孖 +字 +存 +孚 +孛 +孜 +孝 +孟 +孢 +季 +孤 +孩 +孫 +孬 +孮 +孰 +孳 +孵 +學 +孺 +孻 +孽 +孿 +宀 +它 +宅 +宇 +守 +安 +宋 +完 +宍 +宏 +宓 +宕 +宗 +官 +宙 +定 +宛 +宜 +実 +客 +宣 +室 +宥 +宦 +宧 +宮 +宰 +害 +宴 +宵 +家 +宸 +容 +宿 +寀 +寁 +寂 +寄 +寅 +密 +寇 +寈 +寊 +富 +寐 +寒 +寓 +寔 +寕 +寖 +寗 +寘 +寛 +寜 +寞 +察 +寡 +寢 +寤 +寥 +實 +寧 +寨 +審 +寫 +寬 +寮 +寯 +寰 +寳 +寵 +寶 +寸 +寺 +対 +封 +専 +尃 +射 +將 +專 +尉 +尊 +尋 +對 +導 +小 +尐 +少 +尓 +尕 +尖 +尗 +尙 +尚 +尢 +尤 +尨 +尪 +尬 +就 +尷 +尹 +尺 +尻 +尼 +尾 +尿 +局 +屁 +屄 +居 +屆 +屇 +屈 +屋 +屌 +屍 +屎 +屏 +屐 +屑 +屓 +展 +屚 +屜 +屠 +屢 +層 +履 +屬 +屭 +屯 +山 +屹 +屺 +屻 +岀 +岈 +岌 +岐 +岑 +岔 +岡 +岢 +岣 +岧 +岩 +岪 +岫 +岬 +岰 +岱 +岳 +岵 +岷 +岸 +岻 +峁 +峅 +峇 +峋 +峍 +峒 +峘 +峙 +峚 +峠 +峨 +峩 +峪 +峭 +峯 +峰 +峴 +島 +峻 +峼 +峽 +崁 +崆 +崇 +崈 +崋 +崍 +崎 +崐 +崑 +崒 +崔 +崖 +崗 +崘 +崙 +崚 +崛 +崞 +崟 +崠 +崢 +崤 +崧 +崩 +崬 +崮 +崱 +崴 +崵 +崶 +崽 +嵇 +嵊 +嵋 +嵌 +嵎 +嵐 +嵒 +嵕 +嵖 +嵗 +嵙 +嵛 +嵜 +嵨 +嵩 +嵬 +嵮 +嵯 +嵰 +嵴 +嵻 +嵿 +嶁 +嶂 +嶃 +嶄 +嶇 +嶋 +嶌 +嶍 +嶒 +嶔 +嶗 +嶝 +嶠 +嶢 +嶦 +嶧 +嶪 +嶬 +嶰 +嶲 +嶴 +嶷 +嶸 +嶺 +嶼 +嶽 +巂 +巄 +巆 +巋 +巌 +巍 +巎 +巑 +巒 +巔 +巖 +巘 +巛 +川 +州 +巡 +巢 +工 +左 +巧 +巨 +巫 +差 +巰 +己 +已 +巳 +巴 +巶 +巷 +巻 +巽 +巾 +巿 +市 +布 +帆 +希 +帑 +帔 +帕 +帖 +帘 +帙 +帚 +帛 +帝 +帡 +帢 +帥 +師 +席 +帯 +帰 +帳 +帶 +帷 +常 +帽 +幀 +幃 +幄 +幅 +幌 +幔 +幕 +幗 +幚 +幛 +幟 +幡 +幢 +幣 +幪 +幫 +干 +平 +年 +幵 +幷 +幸 +幹 +幺 +幻 +幼 +幽 +幾 +庀 +庁 +広 +庇 +床 +序 +底 +庖 +店 +庚 +府 +庠 +庢 +庥 +度 +座 +庫 +庭 +庲 +庵 +庶 +康 +庸 +庹 +庼 +庾 +廁 +廂 +廄 +廆 +廈 +廉 +廊 +廋 +廌 +廍 +廑 +廓 +廔 +廕 +廖 +廙 +廚 +廝 +廞 +廟 +廠 +廡 +廢 +廣 +廧 +廨 +廩 +廬 +廰 +廱 +廳 +延 +廷 +廸 +建 +廻 +廼 +廿 +弁 +弄 +弅 +弇 +弈 +弉 +弊 +弋 +弍 +式 +弐 +弒 +弓 +弔 +引 +弖 +弗 +弘 +弛 +弟 +弢 +弦 +弧 +弨 +弩 +弭 +弱 +張 +強 +弸 +弼 +弾 +彀 +彄 +彅 +彆 +彈 +彊 +彌 +彎 +彐 +彔 +彖 +彗 +彘 +彙 +彜 +彞 +彠 +彡 +形 +彣 +彤 +彥 +彧 +彩 +彪 +彫 +彬 +彭 +彰 +影 +彳 +彷 +役 +彼 +彿 +往 +征 +徂 +待 +徇 +很 +徉 +徊 +律 +後 +徐 +徑 +徒 +得 +徘 +徙 +徜 +從 +徠 +御 +徧 +徨 +復 +循 +徫 +徬 +徭 +微 +徳 +徴 +徵 +德 +徸 +徹 +徽 +心 +忄 +必 +忉 +忌 +忍 +忐 +忑 +忒 +志 +忘 +忙 +応 +忝 +忞 +忠 +快 +忬 +忯 +忱 +忳 +念 +忻 +忽 +忿 +怍 +怎 +怒 +怕 +怖 +怙 +怛 +思 +怠 +怡 +急 +怦 +性 +怨 +怪 +怯 +怵 +恁 +恂 +恃 +恆 +恊 +恍 +恐 +恕 +恙 +恢 +恣 +恤 +恥 +恨 +恩 +恪 +恬 +恭 +息 +恰 +恵 +恿 +悄 +悅 +悆 +悉 +悌 +悍 +悔 +悖 +悚 +悛 +悝 +悞 +悟 +悠 +患 +悧 +您 +悪 +悰 +悲 +悳 +悵 +悶 +悸 +悼 +情 +惆 +惇 +惑 +惔 +惕 +惘 +惚 +惜 +惟 +惠 +惡 +惣 +惦 +惰 +惱 +惲 +想 +惶 +惹 +惺 +愁 +愃 +愆 +愈 +愉 +愍 +意 +愐 +愒 +愔 +愕 +愚 +愛 +愜 +感 +愣 +愧 +愨 +愫 +愭 +愴 +愷 +愼 +愾 +愿 +慄 +慈 +態 +慌 +慎 +慕 +慘 +慚 +慜 +慟 +慢 +慣 +慥 +慧 +慨 +慮 +慰 +慳 +慵 +慶 +慷 +慾 +憂 +憊 +憋 +憍 +憎 +憐 +憑 +憓 +憕 +憙 +憚 +憤 +憧 +憨 +憩 +憫 +憬 +憲 +憶 +憺 +憻 +憾 +懂 +懃 +懇 +懈 +應 +懋 +懌 +懍 +懐 +懣 +懦 +懮 +懲 +懵 +懶 +懷 +懸 +懺 +懼 +懽 +懾 +懿 +戀 +戇 +戈 +戊 +戌 +戍 +戎 +成 +我 +戒 +戔 +戕 +或 +戙 +戚 +戛 +戟 +戡 +戢 +戥 +戦 +戩 +截 +戮 +戰 +戱 +戲 +戳 +戴 +戶 +戸 +戻 +戽 +戾 +房 +所 +扁 +扆 +扇 +扈 +扉 +手 +扌 +才 +扎 +扒 +打 +扔 +托 +扙 +扛 +扞 +扣 +扥 +扦 +扭 +扮 +扯 +扳 +扶 +批 +扼 +找 +承 +技 +抃 +抄 +抇 +抉 +把 +抑 +抒 +抓 +投 +抖 +抗 +折 +抦 +披 +抬 +抱 +抵 +抹 +抻 +押 +抽 +抿 +拂 +拆 +拇 +拈 +拉 +拋 +拌 +拍 +拎 +拏 +拐 +拒 +拓 +拔 +拖 +拗 +拘 +拙 +拚 +招 +拜 +拝 +拡 +括 +拭 +拮 +拯 +拱 +拳 +拴 +拷 +拺 +拼 +拽 +拾 +拿 +持 +指 +按 +挎 +挑 +挖 +挙 +挨 +挪 +挫 +振 +挲 +挵 +挹 +挺 +挻 +挾 +捂 +捆 +捉 +捌 +捍 +捎 +捏 +捐 +捒 +捕 +捜 +捦 +捧 +捨 +捩 +捫 +捭 +捱 +捲 +捶 +捷 +捺 +捻 +掀 +掂 +掃 +掄 +掇 +授 +掉 +掌 +掏 +掐 +排 +掖 +掘 +掙 +掛 +掞 +掟 +掠 +採 +探 +掣 +接 +控 +推 +掩 +措 +掬 +掰 +掾 +揀 +揄 +揆 +揉 +揍 +描 +提 +插 +揔 +揖 +揚 +換 +握 +揪 +揭 +揮 +援 +揸 +揺 +損 +搏 +搐 +搓 +搔 +搖 +搗 +搜 +搞 +搠 +搢 +搪 +搬 +搭 +搳 +搴 +搵 +搶 +搽 +搾 +摂 +摒 +摔 +摘 +摜 +摞 +摟 +摠 +摧 +摩 +摭 +摯 +摳 +摴 +摵 +摶 +摸 +摹 +摺 +摻 +摽 +撃 +撇 +撈 +撐 +撒 +撓 +撕 +撖 +撙 +撚 +撞 +撣 +撤 +撥 +撩 +撫 +撬 +播 +撮 +撰 +撲 +撳 +撻 +撼 +撾 +撿 +擀 +擁 +擂 +擅 +擇 +擊 +擋 +操 +擎 +擒 +擔 +擘 +據 +擠 +擢 +擥 +擦 +擬 +擯 +擰 +擱 +擲 +擴 +擷 +擺 +擼 +擾 +攀 +攏 +攔 +攖 +攘 +攜 +攝 +攞 +攢 +攣 +攤 +攪 +攫 +攬 +支 +攴 +攵 +收 +攷 +攸 +改 +攻 +攽 +放 +政 +故 +效 +敍 +敎 +敏 +救 +敔 +敕 +敖 +敗 +敘 +教 +敝 +敞 +敟 +敢 +散 +敦 +敫 +敬 +敭 +敲 +整 +敵 +敷 +數 +敻 +敾 +斂 +斃 +文 +斌 +斎 +斐 +斑 +斕 +斖 +斗 +料 +斛 +斜 +斝 +斟 +斡 +斤 +斥 +斧 +斬 +斯 +新 +斷 +方 +於 +施 +斿 +旁 +旂 +旃 +旄 +旅 +旉 +旋 +旌 +旎 +族 +旖 +旗 +旙 +旛 +旡 +既 +日 +旦 +旨 +早 +旬 +旭 +旱 +旲 +旳 +旺 +旻 +旼 +旽 +旾 +旿 +昀 +昂 +昃 +昆 +昇 +昉 +昊 +昌 +昍 +明 +昏 +昐 +易 +昔 +昕 +昚 +昛 +昜 +昝 +昞 +星 +映 +昡 +昣 +昤 +春 +昧 +昨 +昪 +昫 +昭 +是 +昰 +昱 +昴 +昵 +昶 +昺 +晁 +時 +晃 +晈 +晉 +晊 +晏 +晗 +晙 +晚 +晛 +晝 +晞 +晟 +晤 +晦 +晧 +晨 +晩 +晪 +晫 +晭 +普 +景 +晰 +晳 +晴 +晶 +晷 +晸 +智 +晾 +暃 +暄 +暅 +暇 +暈 +暉 +暊 +暌 +暎 +暏 +暐 +暑 +暕 +暖 +暗 +暘 +暝 +暟 +暠 +暢 +暦 +暨 +暫 +暮 +暱 +暲 +暴 +暸 +暹 +暻 +暾 +曄 +曅 +曆 +曇 +曉 +曌 +曔 +曖 +曙 +曜 +曝 +曠 +曦 +曧 +曨 +曩 +曬 +曮 +曰 +曲 +曳 +更 +曶 +曷 +書 +曹 +曺 +曼 +曽 +曾 +替 +最 +會 +月 +有 +朊 +朋 +服 +朏 +朐 +朓 +朔 +朕 +朖 +朗 +望 +朝 +期 +朦 +朧 +木 +未 +末 +本 +札 +朱 +朴 +朵 +朶 +朽 +朿 +杁 +杉 +杋 +杌 +李 +杏 +材 +村 +杓 +杖 +杙 +杜 +杞 +束 +杠 +杣 +杤 +杧 +杬 +杭 +杯 +東 +杲 +杳 +杴 +杵 +杷 +杻 +杼 +松 +板 +极 +枇 +枉 +枋 +枏 +析 +枕 +枖 +林 +枚 +枛 +果 +枝 +枠 +枡 +枯 +枰 +枱 +枲 +枳 +架 +枷 +枸 +枹 +枼 +柁 +柃 +柄 +柉 +柊 +柎 +柏 +某 +柑 +柒 +染 +柔 +柘 +柚 +柜 +柝 +柞 +柟 +查 +柩 +柬 +柯 +柰 +柱 +柳 +柴 +柵 +柶 +柷 +査 +柾 +柿 +栃 +栄 +栐 +栒 +栓 +栜 +栝 +栞 +校 +栢 +栨 +栩 +株 +栲 +栴 +核 +根 +栻 +格 +栽 +桀 +桁 +桂 +桃 +桄 +桅 +框 +案 +桉 +桌 +桎 +桐 +桑 +桓 +桔 +桕 +桖 +桙 +桜 +桝 +桫 +桱 +桲 +桴 +桶 +桷 +桼 +桿 +梀 +梁 +梂 +梃 +梅 +梆 +梉 +梏 +梓 +梔 +梗 +梘 +條 +梟 +梠 +梢 +梣 +梧 +梨 +梫 +梭 +梯 +械 +梱 +梳 +梵 +梶 +梽 +棄 +棆 +棉 +棋 +棍 +棐 +棒 +棓 +棕 +棖 +棗 +棘 +棚 +棛 +棟 +棠 +棡 +棣 +棧 +棨 +棩 +棪 +棫 +森 +棱 +棲 +棵 +棶 +棹 +棺 +棻 +棼 +棽 +椅 +椆 +椇 +椋 +植 +椎 +椏 +椒 +椙 +椥 +椪 +椰 +椲 +椴 +椵 +椹 +椽 +椿 +楂 +楊 +楓 +楔 +楗 +楙 +楚 +楝 +楞 +楠 +楡 +楢 +楣 +楤 +楦 +楧 +楨 +楫 +業 +楮 +楯 +楳 +極 +楷 +楸 +楹 +楽 +楿 +概 +榆 +榊 +榍 +榎 +榑 +榔 +榕 +榖 +榗 +榘 +榛 +榜 +榞 +榢 +榣 +榤 +榦 +榧 +榨 +榫 +榭 +榮 +榲 +榴 +榷 +榻 +榿 +槀 +槁 +槃 +槊 +構 +槌 +槍 +槎 +槐 +槓 +槔 +槗 +様 +槙 +槤 +槩 +槭 +槰 +槱 +槲 +槳 +槺 +槻 +槼 +槽 +槿 +樀 +樁 +樂 +樅 +樆 +樊 +樋 +樑 +樓 +樗 +樘 +標 +樞 +樟 +模 +樣 +樨 +権 +樫 +樵 +樸 +樹 +樺 +樻 +樽 +樾 +橄 +橇 +橈 +橋 +橐 +橒 +橓 +橘 +橙 +橚 +機 +橡 +橢 +橪 +橫 +橿 +檀 +檄 +檇 +檉 +檊 +檎 +檐 +檔 +檗 +檜 +檞 +檠 +檡 +檢 +檣 +檦 +檨 +檫 +檬 +檯 +檳 +檵 +檸 +檻 +檽 +櫂 +櫃 +櫆 +櫈 +櫓 +櫚 +櫛 +櫞 +櫟 +櫥 +櫨 +櫪 +櫱 +櫸 +櫻 +櫾 +櫿 +欄 +欉 +權 +欏 +欒 +欖 +欞 +欠 +次 +欣 +欥 +欲 +欸 +欹 +欺 +欽 +款 +歆 +歇 +歉 +歊 +歌 +歎 +歐 +歓 +歙 +歛 +歡 +止 +正 +此 +步 +武 +歧 +歩 +歪 +歲 +歳 +歴 +歷 +歸 +歹 +死 +歿 +殂 +殃 +殄 +殆 +殉 +殊 +殑 +殖 +殘 +殛 +殞 +殟 +殤 +殭 +殮 +殯 +殲 +殳 +段 +殷 +殺 +殻 +殼 +殿 +毀 +毅 +毆 +毉 +毋 +毌 +母 +毎 +每 +毐 +毒 +毓 +比 +毖 +毗 +毘 +毛 +毫 +毬 +毯 +毴 +毸 +毽 +毿 +氂 +氈 +氍 +氏 +氐 +民 +氓 +氖 +気 +氘 +氙 +氚 +氛 +氟 +氣 +氦 +氧 +氨 +氪 +氫 +氬 +氮 +氯 +氰 +水 +氵 +氷 +永 +氹 +氻 +氽 +氾 +汀 +汁 +求 +汊 +汎 +汐 +汕 +汗 +汛 +汜 +汝 +汞 +江 +池 +污 +汧 +汨 +汩 +汪 +汭 +汰 +汲 +汴 +汶 +決 +汽 +汾 +沁 +沂 +沃 +沄 +沅 +沆 +沇 +沈 +沉 +沌 +沍 +沏 +沐 +沒 +沓 +沔 +沖 +沘 +沙 +沚 +沛 +沜 +沢 +沨 +沫 +沭 +沮 +沯 +沱 +河 +沸 +油 +沺 +治 +沼 +沽 +沾 +沿 +況 +泂 +泄 +泆 +泇 +泉 +泊 +泌 +泐 +泓 +泔 +法 +泖 +泗 +泚 +泛 +泠 +泡 +波 +泣 +泥 +泩 +泫 +泮 +泯 +泰 +泱 +泳 +泵 +洄 +洋 +洌 +洎 +洗 +洙 +洛 +洞 +洢 +洣 +洤 +津 +洨 +洩 +洪 +洮 +洱 +洲 +洳 +洵 +洸 +洹 +洺 +活 +洽 +派 +流 +浄 +浙 +浚 +浛 +浜 +浞 +浟 +浠 +浡 +浣 +浤 +浥 +浦 +浩 +浪 +浮 +浯 +浴 +浵 +海 +浸 +浹 +涅 +涇 +消 +涉 +涌 +涎 +涑 +涓 +涔 +涕 +涙 +涪 +涫 +涮 +涯 +液 +涵 +涸 +涼 +涿 +淄 +淅 +淆 +淇 +淋 +淌 +淍 +淎 +淏 +淑 +淓 +淖 +淘 +淙 +淚 +淛 +淝 +淞 +淠 +淡 +淤 +淥 +淦 +淨 +淩 +淪 +淫 +淬 +淮 +淯 +淰 +深 +淳 +淵 +淶 +混 +淸 +淹 +淺 +添 +淼 +淽 +渃 +清 +済 +渉 +渋 +渕 +渙 +渚 +減 +渝 +渟 +渠 +渡 +渣 +渤 +渥 +渦 +渫 +測 +渭 +港 +渲 +渴 +游 +渺 +渼 +渽 +渾 +湃 +湄 +湉 +湊 +湍 +湓 +湔 +湖 +湘 +湛 +湜 +湞 +湟 +湣 +湥 +湧 +湫 +湮 +湯 +湳 +湴 +湼 +満 +溁 +溇 +溈 +溉 +溋 +溎 +溏 +源 +準 +溙 +溜 +溝 +溟 +溢 +溥 +溦 +溧 +溪 +溫 +溯 +溱 +溲 +溴 +溵 +溶 +溺 +溼 +滀 +滁 +滂 +滄 +滅 +滇 +滈 +滉 +滋 +滌 +滎 +滏 +滑 +滓 +滔 +滕 +滘 +滙 +滝 +滬 +滯 +滲 +滴 +滷 +滸 +滹 +滻 +滽 +滾 +滿 +漁 +漂 +漆 +漇 +漈 +漎 +漏 +漓 +演 +漕 +漚 +漠 +漢 +漣 +漩 +漪 +漫 +漬 +漯 +漱 +漲 +漳 +漴 +漵 +漷 +漸 +漼 +漾 +漿 +潁 +潑 +潔 +潘 +潛 +潞 +潟 +潢 +潤 +潭 +潮 +潯 +潰 +潲 +潺 +潼 +潽 +潾 +潿 +澀 +澁 +澂 +澄 +澆 +澇 +澈 +澉 +澋 +澌 +澍 +澎 +澔 +澗 +澠 +澡 +澣 +澤 +澥 +澧 +澪 +澮 +澯 +澱 +澳 +澶 +澹 +澻 +激 +濁 +濂 +濃 +濉 +濊 +濋 +濕 +濘 +濙 +濛 +濞 +濟 +濠 +濡 +濤 +濫 +濬 +濮 +濯 +濰 +濱 +濲 +濶 +濺 +濼 +濾 +瀁 +瀅 +瀆 +瀉 +瀍 +瀏 +瀑 +瀔 +瀕 +瀘 +瀚 +瀛 +瀝 +瀞 +瀟 +瀠 +瀣 +瀦 +瀧 +瀨 +瀬 +瀰 +瀲 +瀴 +瀶 +瀹 +瀾 +灃 +灊 +灌 +灑 +灘 +灝 +灞 +灡 +灣 +灤 +灧 +火 +灰 +灴 +灸 +灼 +災 +炁 +炅 +炆 +炊 +炎 +炒 +炔 +炕 +炘 +炙 +炟 +炣 +炤 +炫 +炬 +炭 +炮 +炯 +炱 +炲 +炳 +炷 +炸 +為 +炻 +烈 +烉 +烊 +烋 +烏 +烒 +烔 +烘 +烙 +烜 +烝 +烤 +烯 +烱 +烴 +烷 +烹 +烺 +烽 +焃 +焄 +焉 +焊 +焌 +焓 +焗 +焙 +焚 +焜 +焞 +無 +焦 +焯 +焰 +焱 +焴 +然 +焻 +焼 +焿 +煇 +煉 +煊 +煌 +煎 +煐 +煒 +煔 +煕 +煖 +煙 +煚 +煜 +煞 +煠 +煤 +煥 +煦 +照 +煨 +煩 +煬 +煮 +煲 +煳 +煵 +煶 +煸 +煽 +熄 +熅 +熇 +熈 +熊 +熏 +熒 +熔 +熖 +熗 +熘 +熙 +熜 +熟 +熠 +熤 +熥 +熨 +熬 +熯 +熱 +熲 +熳 +熵 +熹 +熺 +熼 +熾 +熿 +燁 +燃 +燄 +燈 +燉 +燊 +燎 +燏 +燐 +燒 +燔 +燕 +燘 +燙 +燚 +燜 +燝 +營 +燥 +燦 +燧 +燫 +燬 +燭 +燮 +燴 +燹 +燻 +燼 +燾 +燿 +爀 +爆 +爌 +爍 +爐 +爔 +爚 +爛 +爝 +爨 +爪 +爬 +爭 +爯 +爰 +爲 +爵 +父 +爸 +爹 +爺 +爻 +爽 +爾 +爿 +牁 +牂 +牆 +片 +版 +牌 +牒 +牕 +牖 +牘 +牙 +牛 +牝 +牟 +牠 +牡 +牢 +牧 +物 +牯 +牲 +特 +牻 +牼 +牽 +犀 +犁 +犂 +犇 +犍 +犎 +犖 +犛 +犢 +犧 +犨 +犬 +犯 +犰 +犴 +犽 +狀 +狂 +狄 +狍 +狎 +狐 +狒 +狓 +狗 +狙 +狛 +狟 +狠 +狡 +狦 +狨 +狩 +狳 +狶 +狷 +狸 +狹 +狻 +狼 +猁 +猄 +猇 +猊 +猗 +猙 +猛 +猜 +猝 +猞 +猢 +猥 +猨 +猩 +猳 +猴 +猶 +猷 +猺 +猻 +猾 +猿 +獁 +獃 +獄 +獅 +獇 +獎 +獏 +獐 +獒 +獠 +獢 +獣 +獨 +獬 +獮 +獯 +獰 +獲 +獴 +獵 +獷 +獸 +獺 +獻 +獼 +獾 +玀 +玄 +玆 +率 +玉 +王 +玎 +玏 +玓 +玕 +玖 +玗 +玘 +玙 +玟 +玠 +玡 +玢 +玥 +玧 +玨 +玩 +玫 +玭 +玲 +玳 +玶 +玷 +玹 +玻 +玾 +珀 +珂 +珅 +珈 +珉 +珊 +珌 +珍 +珎 +珏 +珖 +珙 +珝 +珞 +珠 +珡 +珣 +珤 +珥 +珦 +珧 +珩 +珪 +班 +珮 +珵 +珹 +珺 +珽 +現 +琁 +球 +琄 +琅 +理 +琇 +琉 +琊 +琍 +琎 +琚 +琛 +琡 +琢 +琤 +琥 +琦 +琨 +琪 +琬 +琮 +琯 +琰 +琱 +琳 +琴 +琵 +琶 +琹 +琺 +琿 +瑀 +瑁 +瑂 +瑄 +瑅 +瑆 +瑈 +瑊 +瑋 +瑑 +瑒 +瑕 +瑗 +瑙 +瑚 +瑛 +瑜 +瑝 +瑞 +瑟 +瑠 +瑢 +瑣 +瑤 +瑥 +瑧 +瑨 +瑩 +瑪 +瑭 +瑯 +瑰 +瑱 +瑳 +瑴 +瑺 +瑾 +璀 +璁 +璃 +璄 +璆 +璇 +璈 +璉 +璋 +璌 +璐 +璕 +璘 +璙 +璚 +璜 +璞 +璟 +璠 +璡 +璣 +璥 +璦 +璧 +璨 +璩 +璪 +璫 +璬 +璮 +環 +璱 +璵 +璸 +璹 +璽 +璿 +瓈 +瓊 +瓌 +瓏 +瓑 +瓔 +瓖 +瓘 +瓚 +瓛 +瓜 +瓞 +瓠 +瓢 +瓣 +瓤 +瓦 +瓮 +瓴 +瓶 +瓷 +瓿 +甂 +甄 +甌 +甍 +甑 +甕 +甘 +甙 +甚 +甜 +生 +甡 +產 +産 +甥 +甦 +用 +甩 +甪 +甫 +甬 +甯 +田 +由 +甲 +申 +男 +甸 +甹 +町 +甾 +畀 +畇 +畈 +畊 +畋 +界 +畎 +畏 +畐 +畑 +畔 +留 +畜 +畝 +畠 +畢 +略 +畦 +畧 +番 +畫 +畬 +畯 +異 +畲 +畳 +畵 +當 +畷 +畸 +畹 +畿 +疃 +疆 +疇 +疊 +疋 +疌 +疍 +疏 +疑 +疒 +疕 +疙 +疚 +疝 +疣 +疤 +疥 +疫 +疲 +疳 +疵 +疸 +疹 +疼 +疽 +疾 +痂 +病 +症 +痊 +痍 +痔 +痕 +痘 +痙 +痛 +痞 +痟 +痠 +痢 +痣 +痤 +痧 +痩 +痰 +痱 +痲 +痴 +痹 +痺 +痿 +瘀 +瘁 +瘊 +瘋 +瘍 +瘓 +瘙 +瘜 +瘞 +瘟 +瘠 +瘡 +瘢 +瘤 +瘦 +瘧 +瘩 +瘰 +瘴 +瘺 +癀 +療 +癆 +癇 +癌 +癒 +癖 +癘 +癜 +癟 +癡 +癢 +癤 +癥 +癩 +癬 +癭 +癮 +癯 +癰 +癱 +癲 +癸 +発 +登 +發 +白 +百 +皂 +的 +皆 +皇 +皈 +皋 +皎 +皐 +皓 +皖 +皙 +皚 +皛 +皝 +皞 +皮 +皰 +皴 +皷 +皸 +皺 +皿 +盂 +盃 +盅 +盆 +盈 +益 +盋 +盌 +盎 +盒 +盔 +盛 +盜 +盞 +盟 +盡 +監 +盤 +盥 +盦 +盧 +盨 +盩 +盪 +盫 +目 +盯 +盱 +盲 +直 +盷 +相 +盹 +盺 +盼 +盾 +眀 +省 +眉 +看 +県 +眙 +眛 +眜 +眞 +真 +眠 +眥 +眨 +眩 +眭 +眯 +眵 +眶 +眷 +眸 +眺 +眼 +眾 +着 +睇 +睛 +睜 +睞 +睡 +睢 +督 +睥 +睦 +睨 +睪 +睫 +睭 +睹 +睺 +睽 +睾 +睿 +瞄 +瞅 +瞋 +瞌 +瞎 +瞑 +瞓 +瞞 +瞢 +瞥 +瞧 +瞪 +瞫 +瞬 +瞭 +瞰 +瞳 +瞻 +瞼 +瞽 +瞿 +矇 +矍 +矗 +矚 +矛 +矜 +矞 +矢 +矣 +知 +矧 +矩 +短 +矮 +矯 +石 +矸 +矽 +砂 +砋 +砌 +砍 +砒 +研 +砝 +砢 +砥 +砦 +砧 +砩 +砫 +砭 +砮 +砯 +砰 +砲 +砳 +破 +砵 +砷 +砸 +砼 +硂 +硃 +硅 +硇 +硏 +硐 +硒 +硓 +硚 +硜 +硝 +硤 +硨 +硫 +硬 +硭 +硯 +硼 +碁 +碇 +碉 +碌 +碎 +碑 +碓 +碕 +碗 +碘 +碚 +碟 +碡 +碣 +碧 +碩 +碪 +碭 +碰 +碲 +碳 +碴 +碶 +碸 +確 +碻 +碼 +碽 +碾 +磁 +磅 +磊 +磋 +磐 +磔 +磕 +磘 +磙 +磚 +磜 +磡 +磨 +磪 +磬 +磯 +磱 +磲 +磵 +磷 +磺 +磻 +磾 +礁 +礄 +礎 +礐 +礑 +礒 +礙 +礠 +礦 +礪 +礫 +礬 +礮 +礱 +礴 +示 +礻 +礽 +社 +祀 +祁 +祂 +祆 +祇 +祈 +祉 +祋 +祏 +祐 +祓 +祕 +祖 +祗 +祙 +祚 +祛 +祜 +祝 +神 +祟 +祠 +祥 +祧 +票 +祭 +祹 +祺 +祼 +祿 +禁 +禃 +禇 +禍 +禎 +福 +禑 +禓 +禔 +禕 +禘 +禛 +禟 +禠 +禤 +禦 +禧 +禨 +禩 +禪 +禮 +禰 +禱 +禵 +禹 +禺 +禼 +禽 +禾 +禿 +秀 +私 +秈 +秉 +秋 +科 +秒 +秕 +秘 +租 +秠 +秣 +秤 +秦 +秧 +秩 +秭 +秳 +秸 +移 +稀 +稅 +稈 +稉 +程 +稍 +稑 +稔 +稗 +稘 +稙 +稚 +稜 +稞 +稟 +稠 +種 +稱 +稲 +稷 +稹 +稺 +稻 +稼 +稽 +稾 +稿 +穀 +穂 +穆 +穈 +穉 +穌 +積 +穎 +穗 +穟 +穠 +穡 +穢 +穣 +穩 +穫 +穰 +穴 +穵 +究 +穹 +空 +穿 +突 +窄 +窅 +窈 +窋 +窒 +窕 +窖 +窗 +窘 +窟 +窠 +窣 +窨 +窩 +窪 +窮 +窯 +窰 +窶 +窺 +窿 +竄 +竅 +竇 +竈 +竊 +立 +竑 +站 +竜 +竟 +章 +竣 +童 +竦 +竩 +竭 +端 +競 +竹 +竺 +竻 +竿 +笄 +笆 +笈 +笏 +笑 +笘 +笙 +笛 +笞 +笠 +笥 +符 +笨 +笩 +笪 +第 +笭 +笮 +笯 +笱 +笳 +笹 +筅 +筆 +等 +筊 +筋 +筌 +筍 +筏 +筐 +筒 +答 +策 +筘 +筠 +筥 +筦 +筧 +筬 +筭 +筱 +筲 +筳 +筵 +筶 +筷 +筻 +箆 +箇 +箋 +箍 +箏 +箐 +箑 +箒 +箔 +箕 +算 +箜 +管 +箬 +箭 +箱 +箴 +箸 +節 +篁 +範 +篆 +篇 +築 +篊 +篋 +篌 +篔 +篙 +篝 +篠 +篡 +篤 +篥 +篦 +篩 +篪 +篭 +篯 +篳 +篷 +簀 +簃 +簇 +簉 +簋 +簍 +簑 +簕 +簗 +簞 +簠 +簡 +簧 +簪 +簫 +簷 +簸 +簹 +簺 +簽 +簾 +簿 +籀 +籃 +籌 +籍 +籐 +籙 +籛 +籜 +籝 +籟 +籠 +籣 +籤 +籥 +籪 +籬 +籮 +籲 +米 +籽 +籾 +粄 +粉 +粍 +粑 +粒 +粕 +粗 +粘 +粟 +粢 +粥 +粦 +粧 +粩 +粱 +粲 +粳 +粵 +粹 +粼 +粽 +精 +粿 +糀 +糅 +糊 +糌 +糍 +糎 +糕 +糖 +糙 +糜 +糝 +糞 +糟 +糠 +糢 +糧 +糬 +糯 +糰 +糴 +糶 +糸 +糹 +糺 +系 +糾 +紀 +紂 +約 +紅 +紆 +紇 +紈 +紉 +紊 +紋 +納 +紐 +紑 +紓 +純 +紕 +紗 +紘 +紙 +級 +紛 +紜 +紝 +紞 +素 +紡 +索 +紫 +紮 +累 +細 +紱 +紲 +紳 +紵 +紹 +紺 +紿 +終 +絃 +組 +絆 +経 +絎 +結 +絕 +絛 +絜 +絞 +絡 +絢 +給 +絨 +絪 +絮 +統 +絲 +絳 +絵 +絶 +絹 +絺 +綁 +綃 +綈 +綉 +綎 +綏 +經 +綖 +継 +続 +綜 +綝 +綞 +綠 +綢 +綣 +綦 +綧 +綫 +綬 +維 +綮 +綰 +綱 +網 +綳 +綴 +綸 +綺 +綻 +綽 +綾 +綿 +緁 +緃 +緄 +緈 +緊 +緋 +総 +緑 +緒 +緖 +緘 +線 +緜 +緝 +緞 +締 +緡 +緣 +緤 +編 +緩 +緬 +緯 +緱 +緲 +練 +緹 +緻 +縂 +縄 +縈 +縉 +縊 +縕 +縛 +縝 +縞 +縠 +縡 +縣 +縤 +縫 +縮 +縯 +縱 +縴 +縵 +縷 +縹 +縻 +總 +績 +繁 +繃 +繆 +繇 +繒 +織 +繕 +繖 +繙 +繚 +繞 +繡 +繩 +繪 +繫 +繭 +繰 +繳 +繹 +繻 +繼 +繽 +繾 +纁 +纂 +纈 +續 +纍 +纏 +纓 +纔 +纕 +纖 +纘 +纛 +纜 +缐 +缶 +缸 +缺 +缽 +罃 +罄 +罅 +罈 +罉 +罌 +罍 +罐 +罔 +罕 +罘 +罟 +罡 +罨 +罩 +罪 +置 +罰 +罱 +署 +罳 +罵 +罶 +罷 +罹 +罽 +羂 +羅 +羆 +羈 +羊 +羋 +羌 +美 +羔 +羕 +羗 +羙 +羚 +羞 +羡 +羣 +群 +羥 +羧 +羨 +義 +羯 +羰 +羱 +羲 +羸 +羹 +羽 +羿 +翀 +翁 +翂 +翃 +翅 +翊 +翌 +翎 +翏 +習 +翔 +翕 +翙 +翜 +翟 +翠 +翡 +翥 +翦 +翩 +翬 +翮 +翰 +翱 +翳 +翹 +翻 +翼 +耀 +老 +考 +耄 +者 +耆 +而 +耍 +耎 +耐 +耑 +耒 +耔 +耕 +耗 +耘 +耙 +耜 +耦 +耨 +耬 +耳 +耵 +耶 +耷 +耽 +耿 +聃 +聆 +聊 +聒 +聖 +聘 +聚 +聞 +聟 +聨 +聯 +聰 +聱 +聲 +聳 +聴 +聶 +職 +聽 +聾 +聿 +肄 +肅 +肆 +肇 +肉 +肋 +肌 +肏 +肖 +肘 +肚 +肛 +肜 +肝 +肟 +股 +肢 +肥 +肩 +肪 +肫 +肯 +肱 +育 +肸 +肹 +肺 +肼 +肽 +胂 +胃 +胄 +胅 +胇 +胊 +背 +胍 +胎 +胖 +胗 +胙 +胚 +胛 +胝 +胞 +胡 +胤 +胥 +胬 +胭 +胰 +胱 +胳 +胴 +胸 +胺 +胼 +能 +脂 +脅 +脆 +脇 +脈 +脊 +脒 +脖 +脘 +脛 +脣 +脩 +脫 +脬 +脭 +脯 +脲 +脳 +脷 +脹 +脾 +腆 +腈 +腊 +腋 +腌 +腎 +腐 +腑 +腓 +腔 +腕 +腥 +腦 +腧 +腩 +腫 +腮 +腰 +腱 +腳 +腴 +腸 +腹 +腺 +腿 +膀 +膂 +膈 +膊 +膏 +膚 +膛 +膜 +膝 +膠 +膣 +膥 +膦 +膨 +膩 +膮 +膳 +膺 +膽 +膾 +膿 +臀 +臂 +臃 +臆 +臉 +臊 +臍 +臏 +臘 +臚 +臞 +臟 +臠 +臣 +臧 +臨 +自 +臭 +臯 +至 +致 +臺 +臻 +臼 +臾 +舂 +舅 +與 +興 +舉 +舊 +舌 +舍 +舎 +舒 +舔 +舖 +舘 +舛 +舜 +舞 +舟 +舢 +舥 +舨 +舩 +航 +舫 +般 +舲 +舵 +舶 +舷 +舸 +船 +舺 +艅 +艇 +艉 +艋 +艎 +艏 +艔 +艘 +艙 +艚 +艦 +艮 +良 +艱 +色 +艶 +艷 +艸 +艽 +艾 +艿 +芃 +芊 +芋 +芍 +芎 +芑 +芒 +芘 +芙 +芛 +芝 +芡 +芥 +芨 +芩 +芪 +芫 +芬 +芭 +芮 +芯 +花 +芳 +芴 +芷 +芸 +芹 +芻 +芽 +芾 +苄 +苅 +苑 +苒 +苓 +苔 +苕 +苗 +苛 +苜 +苝 +苞 +苟 +苡 +苣 +苤 +若 +苦 +苧 +苪 +苫 +苯 +英 +苳 +苴 +苷 +苺 +苻 +苼 +苾 +茀 +茁 +茂 +范 +茄 +茅 +茆 +茇 +茈 +茉 +茌 +茗 +茘 +茚 +茛 +茜 +茝 +茨 +茫 +茬 +茭 +茮 +茯 +茱 +茲 +茴 +茵 +茶 +茷 +茸 +茹 +茺 +茼 +荀 +荃 +荅 +荇 +草 +荊 +荎 +荏 +荒 +荔 +荖 +荘 +荳 +荷 +荸 +荻 +荼 +荽 +莆 +莉 +莊 +莎 +莒 +莓 +莕 +莖 +莘 +莙 +莛 +莜 +莞 +莠 +莢 +莧 +莨 +莩 +莪 +莫 +莽 +莿 +菀 +菁 +菅 +菇 +菈 +菉 +菊 +菌 +菍 +菏 +菑 +菓 +菔 +菖 +菘 +菜 +菝 +菟 +菠 +菡 +菥 +菩 +菪 +菫 +華 +菰 +菱 +菲 +菴 +菶 +菸 +菹 +菺 +菼 +菽 +菾 +萁 +萃 +萄 +萇 +萊 +萌 +萍 +萎 +萐 +萘 +萜 +萠 +萡 +萣 +萩 +萬 +萭 +萱 +萵 +萸 +萹 +萼 +落 +葃 +葆 +葉 +葊 +葎 +葑 +葒 +著 +葙 +葚 +葛 +葜 +葝 +葡 +董 +葦 +葩 +葫 +葬 +葭 +葯 +葰 +葳 +葵 +葶 +葷 +葺 +蒂 +蒄 +蒍 +蒎 +蒐 +蒓 +蒔 +蒗 +蒙 +蒜 +蒞 +蒟 +蒡 +蒢 +蒤 +蒧 +蒨 +蒭 +蒯 +蒲 +蒴 +蒸 +蒹 +蒺 +蒻 +蒼 +蒽 +蒾 +蒿 +蓀 +蓁 +蓂 +蓄 +蓆 +蓉 +蓋 +蓍 +蓑 +蓓 +蓖 +蓘 +蓚 +蓧 +蓨 +蓪 +蓬 +蓭 +蓮 +蓯 +蓳 +蓼 +蓽 +蓿 +蔆 +蔎 +蔑 +蔓 +蔔 +蔕 +蔗 +蔘 +蔚 +蔝 +蔞 +蔡 +蔣 +蔥 +蔦 +蔬 +蔭 +蔴 +蔵 +蔻 +蔽 +蕁 +蕃 +蕅 +蕈 +蕉 +蕊 +蕎 +蕑 +蕒 +蕖 +蕘 +蕙 +蕚 +蕟 +蕡 +蕢 +蕤 +蕨 +蕩 +蕪 +蕭 +蕷 +蕹 +蕺 +蕻 +蕾 +薀 +薄 +薆 +薇 +薈 +薊 +薌 +薏 +薐 +薑 +薔 +薗 +薘 +薙 +薛 +薜 +薞 +薟 +薡 +薦 +薨 +薩 +薪 +薫 +薬 +薯 +薰 +薲 +薷 +薸 +薹 +薺 +薾 +薿 +藁 +藉 +藍 +藎 +藏 +藐 +藔 +藕 +藜 +藝 +藟 +藤 +藥 +藦 +藨 +藩 +藪 +藶 +藸 +藹 +藺 +藻 +藿 +蘂 +蘄 +蘅 +蘆 +蘇 +蘊 +蘋 +蘐 +蘑 +蘓 +蘗 +蘘 +蘚 +蘞 +蘢 +蘧 +蘩 +蘭 +蘵 +蘶 +蘸 +蘼 +蘿 +虉 +虎 +虐 +虓 +虔 +處 +虖 +虛 +虜 +虞 +號 +虢 +虧 +虨 +虯 +虱 +虵 +虹 +虺 +虻 +蚆 +蚊 +蚋 +蚌 +蚍 +蚓 +蚖 +蚜 +蚝 +蚡 +蚢 +蚣 +蚤 +蚧 +蚨 +蚩 +蚪 +蚯 +蚱 +蚴 +蚵 +蚶 +蚺 +蚼 +蛀 +蛄 +蛇 +蛉 +蛋 +蛍 +蛐 +蛑 +蛔 +蛙 +蛛 +蛞 +蛟 +蛤 +蛭 +蛯 +蛸 +蛹 +蛺 +蛻 +蛾 +蜀 +蜂 +蜃 +蜆 +蜇 +蜈 +蜉 +蜊 +蜍 +蜑 +蜒 +蜓 +蜘 +蜚 +蜛 +蜜 +蜞 +蜢 +蜣 +蜥 +蜨 +蜮 +蜯 +蜱 +蜴 +蜷 +蜻 +蜾 +蜿 +蝀 +蝌 +蝍 +蝎 +蝓 +蝕 +蝗 +蝘 +蝙 +蝚 +蝟 +蝠 +蝣 +蝤 +蝦 +蝨 +蝮 +蝯 +蝰 +蝲 +蝴 +蝶 +蝸 +蝽 +螂 +螃 +螄 +螅 +螈 +螋 +融 +螐 +螔 +螞 +螟 +螠 +螢 +螣 +螥 +螫 +螭 +螯 +螳 +螶 +螺 +螻 +螽 +螾 +蟀 +蟄 +蟅 +蟆 +蟊 +蟋 +蟌 +蟎 +蟑 +蟒 +蟜 +蟠 +蟥 +蟪 +蟫 +蟬 +蟯 +蟲 +蟳 +蟴 +蟶 +蟹 +蟻 +蟾 +蠂 +蠃 +蠄 +蠅 +蠆 +蠊 +蠋 +蠍 +蠐 +蠑 +蠓 +蠔 +蠕 +蠖 +蠘 +蠙 +蠟 +蠡 +蠢 +蠣 +蠱 +蠲 +蠵 +蠶 +蠷 +蠹 +蠻 +血 +衂 +衆 +行 +衍 +衎 +術 +衕 +衖 +街 +衙 +衚 +衛 +衜 +衝 +衞 +衡 +衢 +衣 +表 +衩 +衫 +衰 +衲 +衷 +衽 +衾 +衿 +袁 +袂 +袈 +袋 +袍 +袓 +袖 +袛 +袞 +袤 +袪 +被 +袱 +袴 +袾 +裁 +裂 +裊 +裎 +裒 +裔 +裕 +裖 +裘 +裙 +補 +裝 +裟 +裡 +裨 +裬 +裱 +裳 +裴 +裵 +裸 +裹 +製 +裾 +裿 +褀 +褂 +複 +褌 +褍 +褎 +褐 +褒 +褓 +褔 +褘 +褙 +褚 +褞 +褥 +褧 +褪 +褫 +褭 +褲 +褶 +褸 +褻 +襄 +襌 +襖 +襞 +襟 +襠 +襤 +襦 +襪 +襯 +襲 +襴 +襶 +襻 +襾 +西 +要 +覃 +覆 +覇 +覈 +見 +覌 +規 +覓 +視 +覚 +覡 +覦 +覧 +親 +覬 +覲 +観 +覺 +覽 +覿 +觀 +角 +觔 +觙 +觚 +觜 +解 +觭 +觱 +觴 +觶 +觸 +觿 +言 +訁 +訂 +訃 +訇 +計 +訊 +訌 +討 +訏 +訐 +訒 +訓 +訔 +訕 +訖 +託 +記 +訛 +訝 +訟 +訣 +訥 +訪 +設 +許 +訴 +訶 +診 +註 +証 +訾 +詁 +詆 +詈 +詐 +詒 +詔 +評 +詛 +詞 +詠 +詡 +詢 +詣 +詥 +試 +詧 +詩 +詫 +詭 +詮 +詰 +話 +該 +詳 +詵 +詹 +詼 +誄 +誅 +誇 +誌 +認 +誒 +誓 +誕 +誘 +語 +誠 +誡 +誣 +誤 +誥 +誦 +誨 +說 +説 +読 +誰 +課 +誴 +誹 +誼 +誾 +調 +談 +請 +諍 +諏 +諒 +論 +諗 +諜 +諟 +諠 +諡 +諤 +諦 +諧 +諪 +諫 +諭 +諮 +諱 +諲 +諳 +諴 +諶 +諷 +諸 +諺 +諼 +諾 +謀 +謁 +謂 +謄 +謇 +謊 +謌 +謎 +謏 +謐 +謔 +謖 +謗 +謙 +謚 +講 +謜 +謝 +謠 +謢 +謤 +謨 +謩 +謫 +謬 +謳 +謹 +謾 +證 +譏 +譓 +譔 +識 +譙 +譚 +譜 +譞 +警 +譫 +譬 +譭 +譯 +議 +譲 +譳 +譴 +護 +譽 +譿 +讀 +讃 +變 +讌 +讎 +讓 +讖 +讙 +讚 +讜 +讞 +谷 +谿 +豁 +豆 +豇 +豈 +豉 +豊 +豌 +豎 +豐 +豔 +豕 +豚 +象 +豢 +豨 +豪 +豫 +豬 +豳 +豸 +豹 +豺 +豿 +貂 +貅 +貉 +貊 +貌 +貐 +貒 +貓 +貔 +貘 +貝 +貞 +負 +財 +貢 +貤 +貧 +貨 +販 +貪 +貫 +責 +貭 +貮 +貯 +貲 +貳 +貴 +貶 +買 +貸 +貺 +費 +貼 +貽 +貿 +賀 +賁 +賂 +賃 +賄 +資 +賈 +賊 +賑 +賒 +賓 +賔 +賕 +賚 +賜 +賞 +賠 +賡 +賢 +賣 +賤 +賦 +賨 +質 +賬 +賭 +賴 +賹 +賺 +賻 +購 +賽 +賾 +贄 +贅 +贇 +贈 +贊 +贌 +贍 +贏 +贓 +贔 +贖 +贛 +赤 +赦 +赧 +赫 +赬 +赭 +走 +赳 +赴 +起 +趁 +超 +越 +趐 +趕 +趖 +趙 +趟 +趣 +趨 +足 +趴 +趵 +趺 +趼 +趾 +跅 +跆 +跋 +跌 +跏 +跑 +跖 +跗 +跛 +距 +跟 +跡 +跣 +跤 +跨 +跩 +跪 +路 +跳 +踎 +踏 +踐 +踝 +踞 +踢 +踩 +踰 +踴 +踹 +踺 +蹂 +蹄 +蹇 +蹈 +蹉 +蹊 +蹋 +蹕 +蹙 +蹟 +蹠 +蹤 +蹦 +蹬 +蹭 +蹯 +蹲 +蹴 +蹶 +蹺 +蹻 +蹼 +躁 +躂 +躄 +躉 +躋 +躍 +躑 +躒 +躔 +躝 +躪 +身 +躬 +躰 +躲 +躺 +軀 +車 +軋 +軌 +軍 +軎 +軒 +軔 +軛 +軟 +転 +軫 +軲 +軸 +軹 +軺 +軻 +軼 +軽 +軾 +較 +輄 +輅 +載 +輋 +輒 +輓 +輔 +輕 +輛 +輝 +輞 +輟 +輥 +輦 +輩 +輪 +輬 +輭 +輯 +輶 +輸 +輻 +輾 +輿 +轀 +轂 +轄 +轅 +轆 +轉 +轍 +轎 +轘 +轝 +轟 +轤 +辛 +辜 +辟 +辣 +辦 +辧 +辨 +辭 +辮 +辯 +辰 +辱 +農 +辵 +辺 +辻 +込 +迂 +迄 +迅 +迎 +近 +返 +迢 +迤 +迥 +迦 +迪 +迫 +迭 +迮 +述 +迴 +迵 +迷 +迸 +迺 +追 +退 +送 +逃 +逄 +逅 +逆 +逈 +逋 +逌 +逍 +逎 +透 +逐 +逑 +途 +逕 +逖 +逗 +這 +通 +逛 +逝 +逞 +速 +造 +逢 +連 +逤 +逨 +逮 +逯 +進 +逴 +逵 +逸 +逹 +逺 +逼 +逾 +遁 +遂 +遄 +遇 +遊 +運 +遍 +過 +遏 +遐 +遒 +道 +達 +違 +遘 +遙 +遛 +遜 +遞 +遠 +遢 +遣 +遨 +適 +遭 +遮 +遯 +遲 +遴 +遵 +遶 +遷 +選 +遹 +遺 +遼 +避 +邀 +邁 +邂 +邃 +還 +邇 +邈 +邉 +邊 +邋 +邏 +邑 +邕 +邗 +邙 +邛 +邠 +邡 +邢 +那 +邦 +邨 +邪 +邯 +邰 +邱 +邲 +邳 +邴 +邵 +邸 +邽 +邾 +郁 +郃 +郄 +郅 +郇 +郊 +郋 +郎 +郗 +郛 +郜 +郝 +郞 +郟 +郡 +郢 +郤 +部 +郪 +郫 +郭 +郯 +郳 +郴 +郵 +郷 +都 +郾 +郿 +鄂 +鄃 +鄄 +鄆 +鄉 +鄋 +鄑 +鄒 +鄔 +鄖 +鄗 +鄘 +鄙 +鄚 +鄜 +鄞 +鄠 +鄢 +鄣 +鄤 +鄧 +鄩 +鄫 +鄭 +鄯 +鄰 +鄱 +鄲 +鄳 +鄴 +鄺 +酃 +酆 +酈 +酉 +酊 +酋 +酌 +配 +酎 +酏 +酐 +酒 +酔 +酗 +酚 +酞 +酡 +酢 +酣 +酥 +酩 +酪 +酬 +酮 +酯 +酰 +酴 +酵 +酶 +酷 +酸 +酺 +酼 +醁 +醂 +醃 +醅 +醇 +醉 +醋 +醌 +醍 +醐 +醒 +醚 +醛 +醜 +醞 +醢 +醣 +醪 +醫 +醬 +醮 +醯 +醴 +醺 +醾 +醿 +釀 +釁 +釆 +采 +釉 +釋 +里 +重 +野 +量 +釐 +金 +釒 +釓 +釔 +釕 +釗 +釘 +釙 +釚 +釜 +針 +釣 +釤 +釦 +釧 +釩 +釪 +釭 +釴 +釵 +釷 +釹 +釺 +鈀 +鈁 +鈄 +鈇 +鈈 +鈉 +鈊 +鈍 +鈏 +鈐 +鈑 +鈔 +鈕 +鈖 +鈞 +鈢 +鈣 +鈥 +鈦 +鈫 +鈮 +鈰 +鈳 +鈴 +鈷 +鈸 +鈹 +鈺 +鈾 +鈿 +鉀 +鉄 +鉅 +鉆 +鉈 +鉉 +鉋 +鉌 +鉍 +鉏 +鉑 +鉓 +鉗 +鉚 +鉛 +鉞 +鉟 +鉤 +鉦 +鉬 +鉭 +鉲 +鉶 +鉷 +鉸 +鉻 +鉾 +鉿 +銀 +銂 +銃 +銅 +銋 +銍 +銑 +銓 +銕 +銖 +銘 +銚 +銜 +銠 +銣 +銥 +銦 +銨 +銩 +銪 +銫 +銬 +銭 +銱 +銲 +銳 +銶 +銷 +銹 +銻 +銼 +銾 +鋁 +鋅 +鋆 +鋇 +鋌 +鋏 +鋐 +鋒 +鋕 +鋗 +鋙 +鋡 +鋤 +鋥 +鋦 +鋨 +鋪 +鋮 +鋯 +鋰 +鋱 +鋳 +鋶 +鋸 +鋹 +鋼 +錀 +錄 +錏 +錐 +錒 +錕 +錘 +錚 +錞 +錟 +錠 +錡 +錢 +錦 +錨 +錫 +錬 +錮 +錯 +錳 +錶 +錸 +錻 +鍀 +鍇 +鍈 +鍉 +鍊 +鍋 +鍍 +鍏 +鍔 +鍘 +鍛 +鍝 +鍟 +鍠 +鍥 +鍩 +鍬 +鍱 +鍳 +鍵 +鍶 +鍷 +鍺 +鍼 +鍾 +鎂 +鎅 +鎊 +鎌 +鎏 +鎓 +鎔 +鎖 +鎗 +鎘 +鎚 +鎛 +鎢 +鎣 +鎦 +鎧 +鎪 +鎬 +鎭 +鎮 +鎰 +鎳 +鎵 +鎻 +鏃 +鏇 +鏈 +鏊 +鏌 +鏐 +鏑 +鏓 +鏖 +鏗 +鏘 +鏜 +鏝 +鏞 +鏟 +鏡 +鏢 +鏤 +鏦 +鏳 +鏴 +鏵 +鏷 +鏻 +鏽 +鐃 +鐇 +鐈 +鐓 +鐔 +鐘 +鐙 +鐠 +鐡 +鐤 +鐦 +鐧 +鐫 +鐬 +鐭 +鐮 +鐲 +鐳 +鐵 +鐸 +鐺 +鐽 +鐿 +鑀 +鑁 +鑂 +鑄 +鑅 +鑊 +鑌 +鑑 +鑒 +鑛 +鑠 +鑣 +鑨 +鑪 +鑫 +鑭 +鑰 +鑲 +鑴 +鑷 +鑼 +鑽 +鑾 +鑿 +長 +門 +閂 +閃 +閆 +閉 +開 +閎 +閏 +閑 +閒 +間 +閔 +閘 +閜 +閞 +閟 +関 +閣 +閥 +閦 +閨 +閩 +閬 +閭 +閰 +閱 +閶 +閹 +閻 +閼 +閾 +閿 +闆 +闇 +闈 +闊 +闋 +闌 +闍 +闐 +闓 +闔 +闕 +闖 +闘 +關 +闞 +闡 +闢 +闥 +阜 +阝 +阡 +阪 +阭 +阮 +阯 +阱 +防 +阻 +阿 +陀 +陁 +陂 +附 +陋 +陌 +降 +限 +陔 +陘 +陛 +陜 +陝 +陞 +陟 +陡 +院 +陣 +除 +陪 +陬 +陰 +陲 +陳 +陵 +陶 +陷 +陸 +険 +陽 +隄 +隅 +隆 +隈 +隊 +隋 +隍 +階 +隔 +隕 +隗 +隘 +隙 +際 +障 +隣 +隧 +隨 +險 +隰 +隱 +隲 +隳 +隴 +隷 +隸 +隹 +隻 +隼 +雀 +雁 +雄 +雅 +集 +雇 +雉 +雋 +雌 +雍 +雎 +雑 +雒 +雕 +雖 +雙 +雛 +雜 +雝 +雞 +離 +難 +雨 +雩 +雪 +雫 +雯 +雱 +雲 +零 +雷 +雹 +電 +需 +霄 +霅 +霆 +震 +霈 +霉 +霊 +霍 +霎 +霏 +霑 +霓 +霖 +霙 +霜 +霞 +霤 +霧 +霨 +霰 +露 +霶 +霸 +霹 +霽 +霾 +靁 +靂 +靄 +靈 +靉 +靑 +青 +靖 +靚 +靛 +靜 +非 +靠 +靡 +面 +革 +靫 +靬 +靭 +靳 +靴 +靶 +靺 +靼 +鞅 +鞆 +鞋 +鞍 +鞏 +鞘 +鞞 +鞠 +鞣 +鞥 +鞦 +鞨 +鞭 +鞮 +鞴 +韁 +韃 +韆 +韋 +韌 +韑 +韓 +韙 +韜 +韞 +韠 +韡 +韭 +韮 +音 +韶 +韺 +韻 +韾 +響 +頁 +頂 +頃 +項 +順 +須 +頊 +頌 +頍 +頎 +頏 +預 +頑 +頒 +頓 +頔 +頗 +領 +頜 +頠 +頡 +頤 +頦 +頫 +頭 +頰 +頴 +頵 +頷 +頸 +頹 +頻 +頼 +顆 +題 +額 +顎 +顏 +顒 +顓 +顔 +顕 +顗 +願 +顙 +顛 +類 +顥 +顧 +顫 +顯 +顰 +顱 +顳 +顴 +風 +颮 +颯 +颱 +颶 +颺 +颼 +飄 +飆 +飈 +飛 +食 +飠 +飡 +飢 +飥 +飩 +飪 +飫 +飬 +飭 +飮 +飯 +飲 +飴 +飼 +飽 +飾 +餃 +餄 +餅 +餉 +養 +餌 +餎 +餐 +餒 +餓 +餗 +餘 +餚 +餛 +餞 +餠 +餡 +館 +餮 +餵 +餺 +餾 +餿 +饃 +饅 +饋 +饌 +饑 +饒 +饕 +饗 +饞 +饟 +饢 +首 +馗 +馘 +香 +馛 +馥 +馦 +馨 +馬 +馭 +馮 +馯 +馱 +馳 +馴 +馼 +駁 +駄 +駅 +駆 +駐 +駑 +駒 +駔 +駕 +駘 +駙 +駛 +駝 +駟 +駢 +駭 +駰 +駱 +駿 +騁 +騂 +騄 +騅 +騋 +騎 +騏 +験 +騖 +騙 +騤 +騨 +騫 +騭 +騮 +騰 +騶 +騷 +騾 +驁 +驃 +驄 +驅 +驊 +驌 +驍 +驎 +驒 +驕 +驗 +驚 +驛 +驟 +驢 +驤 +驥 +驩 +驪 +骨 +骯 +骰 +骶 +骷 +骸 +骼 +髀 +髂 +髎 +髏 +髑 +髒 +髓 +體 +高 +髙 +髡 +髦 +髪 +髭 +髮 +髯 +髲 +髷 +髹 +髻 +鬃 +鬄 +鬅 +鬆 +鬍 +鬚 +鬟 +鬢 +鬣 +鬥 +鬧 +鬨 +鬩 +鬪 +鬬 +鬮 +鬯 +鬱 +鬲 +鬹 +鬻 +鬼 +魁 +魂 +魃 +魄 +魅 +魈 +魋 +魍 +魎 +魏 +魔 +魕 +魘 +魚 +魛 +魞 +魟 +魣 +魨 +魩 +魮 +魯 +魴 +魷 +鮀 +鮁 +鮃 +鮄 +鮊 +鮋 +鮍 +鮐 +鮑 +鮒 +鮓 +鮗 +鮜 +鮟 +鮠 +鮡 +鮣 +鮨 +鮪 +鮫 +鮭 +鮮 +鮰 +鮸 +鮹 +鮻 +鯀 +鯁 +鯃 +鯇 +鯉 +鯊 +鯏 +鯒 +鯓 +鯔 +鯕 +鯖 +鯗 +鯙 +鯛 +鯡 +鯢 +鯤 +鯧 +鯨 +鯪 +鯭 +鯮 +鯰 +鯶 +鯷 +鯻 +鯽 +鯿 +鰂 +鰃 +鰆 +鰈 +鰉 +鰍 +鰏 +鰒 +鰓 +鰕 +鰗 +鰛 +鰜 +鰟 +鰣 +鰤 +鰧 +鰨 +鰩 +鰭 +鰮 +鰱 +鰲 +鰳 +鰶 +鰷 +鰹 +鰺 +鰻 +鰼 +鰾 +鱀 +鱂 +鱅 +鱇 +鱈 +鱉 +鱊 +鱒 +鱓 +鱔 +鱖 +鱗 +鱘 +鱚 +鱝 +鱟 +鱠 +鱣 +鱥 +鱧 +鱨 +鱬 +鱮 +鱰 +鱲 +鱵 +鱷 +鱸 +鱺 +鱻 +鳥 +鳧 +鳩 +鳯 +鳰 +鳳 +鳴 +鳶 +鳽 +鴆 +鴇 +鴉 +鴒 +鴓 +鴕 +鴗 +鴛 +鴝 +鴞 +鴟 +鴡 +鴣 +鴦 +鴨 +鴫 +鴯 +鴰 +鴴 +鴻 +鴿 +鵂 +鵄 +鵎 +鵐 +鵑 +鵒 +鵓 +鵙 +鵜 +鵝 +鵞 +鵟 +鵠 +鵡 +鵪 +鵬 +鵯 +鵰 +鵲 +鵵 +鵼 +鵾 +鶆 +鶇 +鶉 +鶏 +鶒 +鶓 +鶘 +鶚 +鶡 +鶥 +鶩 +鶬 +鶯 +鶲 +鶴 +鶹 +鶺 +鶻 +鶼 +鶿 +鷂 +鷄 +鷉 +鷎 +鷓 +鷗 +鷙 +鷚 +鷟 +鷥 +鷦 +鷫 +鷯 +鷲 +鷳 +鷸 +鷹 +鷺 +鸊 +鸌 +鸐 +鸑 +鸕 +鸘 +鸚 +鸛 +鸜 +鸝 +鸞 +鹮 +鹵 +鹹 +鹼 +鹽 +鹿 +麂 +麅 +麇 +麈 +麊 +麋 +麐 +麒 +麓 +麗 +麝 +麞 +麟 +麥 +麩 +麪 +麯 +麴 +麵 +麹 +麺 +麻 +麼 +麽 +麾 +麿 +黁 +黃 +黇 +黌 +黍 +黎 +黏 +黐 +黑 +黒 +黔 +默 +黙 +黛 +黜 +黝 +點 +黟 +黥 +黧 +黨 +黯 +黴 +黶 +黻 +黼 +黽 +黿 +鼂 +鼇 +鼈 +鼉 +鼎 +鼐 +鼒 +鼓 +鼕 +鼙 +鼠 +鼢 +鼩 +鼬 +鼯 +鼱 +鼴 +鼷 +鼻 +鼽 +鼾 +齊 +齋 +齒 +齕 +齡 +齣 +齦 +齧 +齲 +齶 +龍 +龎 +龐 +龑 +龔 +龕 +龜 +龝 +龠 +龢 +郎 +凉 +﹑ +﹗ +﹝ +﹞ +﹢ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +A +B +C +D +E +F +G +H +I +K +L +M +N +O +P +R +S +T +U +V +W +Y +Z +[ +] +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +r +s +t +u +z +{ +| +} +~ +¥ +𣇉 + diff --git a/modules/pytorchocr/utils/dict/cyrillic_dict.txt b/modules/pytorchocr/utils/dict/cyrillic_dict.txt new file mode 100644 index 0000000..2b6f664 --- /dev/null +++ b/modules/pytorchocr/utils/dict/cyrillic_dict.txt @@ -0,0 +1,163 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +Ё +Є +І +Ј +Љ +Ў +А +Б +В +Г +Д +Е +Ж +З +И +Й +К +Л +М +Н +О +П +Р +С +Т +У +Ф +Х +Ц +Ч +Ш +Щ +Ъ +Ы +Ь +Э +Ю +Я +а +б +в +г +д +е +ж +з +и +й +к +л +м +н +о +п +р +с +т +у +ф +х +ц +ч +ш +щ +ъ +ы +ь +э +ю +я +ё +ђ +є +і +ј +љ +њ +ћ +ў +џ +Ґ +ґ diff --git a/modules/pytorchocr/utils/dict/devanagari_dict.txt b/modules/pytorchocr/utils/dict/devanagari_dict.txt new file mode 100644 index 0000000..f559230 --- /dev/null +++ b/modules/pytorchocr/utils/dict/devanagari_dict.txt @@ -0,0 +1,167 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +ँ +ं +ः +अ +आ +इ +ई +उ +ऊ +ऋ +ए +ऐ +ऑ +ओ +औ +क +ख +ग +घ +ङ +च +छ +ज +झ +ञ +ट +ठ +ड +ढ +ण +त +थ +द +ध +न +ऩ +प +फ +ब +भ +म +य +र +ऱ +ल +ळ +व +श +ष +स +ह +़ +ा +ि +ी +ु +ू +ृ +ॅ +े +ै +ॉ +ो +ौ +् +॒ +क़ +ख़ +ग़ +ज़ +ड़ +ढ़ +फ़ +ॠ +। +० +१ +२ +३ +४ +५ +६ +७ +८ +९ +॰ diff --git a/modules/pytorchocr/utils/dict/en_dict.txt b/modules/pytorchocr/utils/dict/en_dict.txt new file mode 100644 index 0000000..6fbd99f --- /dev/null +++ b/modules/pytorchocr/utils/dict/en_dict.txt @@ -0,0 +1,63 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z + diff --git a/modules/pytorchocr/utils/dict/es_dict.txt b/modules/pytorchocr/utils/dict/es_dict.txt new file mode 100644 index 0000000..f195f1e --- /dev/null +++ b/modules/pytorchocr/utils/dict/es_dict.txt @@ -0,0 +1,110 @@ +x +i +_ +m +g +/ +1 +0 +I +L +S +V +R +C +2 +v +a +l +3 +6 +4 +5 +. +j +p + +Q +u +e +r +o +8 +7 +n +c +9 +t +b +é +q +d +ó +y +F +s +, +O +í +T +f +" +U +M +h +: +P +H +A +E +D +z +N +á +ñ +ú +% +; +è ++ +Y +- +B +G +( +) +¿ +? +w +¡ +! +X +É +K +k +Á +ü +Ú +« +» +J +' +ö +W +Z +º +Ö +­ +[ +] +Ç +ç +à +ä +û +ò +Í +ê +ô +ø +ª diff --git a/modules/pytorchocr/utils/dict/fa_dict.txt b/modules/pytorchocr/utils/dict/fa_dict.txt new file mode 100644 index 0000000..2328fbd --- /dev/null +++ b/modules/pytorchocr/utils/dict/fa_dict.txt @@ -0,0 +1,136 @@ +f +a +_ +i +m +g +/ +1 +3 +I +L +S +V +R +C +2 +0 +v +l +6 +8 +5 +. +j +p +و +د +ر +ك +ن +ش +ه +ا +4 +9 +ی +ج +ِ +7 +غ +ل +س +ز +ّ +ت +ک +گ +ي +م +ب +ف +چ +خ +ق +ژ +آ +ص +پ +َ +ع +ئ +ح +ٔ +ض +ُ +ذ +أ +ى +ط +ظ +ث +ة +ً +ء +ؤ +ْ +ۀ +إ +ٍ +ٌ +ٰ +ٓ +ٱ +s +c +e +n +w +N +E +W +Y +D +O +H +A +d +z +r +T +G +o +t +x +h +b +B +M +Z +u +P +F +y +q +U +K +k +J +Q +' +X +# +? +% +$ +, +: +& +! +- +( +É +@ +é ++ + diff --git a/modules/pytorchocr/utils/dict/french_dict.txt b/modules/pytorchocr/utils/dict/french_dict.txt new file mode 100644 index 0000000..e8f657d --- /dev/null +++ b/modules/pytorchocr/utils/dict/french_dict.txt @@ -0,0 +1,136 @@ +f +e +n +c +h +_ +i +m +g +/ +r +v +a +l +t +w +o +d +6 +1 +. +p +B +u +2 +à +3 +R +y +4 +U +E +A +5 +P +O +S +T +D +7 +Z +8 +I +N +L +G +M +H +0 +J +K +- +9 +F +C +V +é +X +' +s +Q +: +è +x +b +Y +Œ +É +z +W +Ç +È +k +Ô +ô +€ +À +Ê +q +ù +° +ê +î +* + +j +" +, +â +% +û +ç +ü +? +! +; +ö +( +) +ï +º +ó +ø +å ++ +™ +á +Ë +< +² +Á +Î +& +@ +œ +ε +Ü +ë +[ +] +í +ò +Ö +ä +ß +« +» +ú +ñ +æ +µ +³ +Å +$ +# + diff --git a/modules/pytorchocr/utils/dict/german_dict.txt b/modules/pytorchocr/utils/dict/german_dict.txt new file mode 100644 index 0000000..5e121af --- /dev/null +++ b/modules/pytorchocr/utils/dict/german_dict.txt @@ -0,0 +1,143 @@ + +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +] +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +£ +§ +­ +° +´ +µ +· +º +¿ +Á +Ä +Å +É +Ï +Ô +Ö +Ü +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +í +ï +ñ +ò +ó +ô +ö +ø +ù +ú +û +ü +ō +Š +Ÿ +ʒ +β +δ +з +Ṡ +‘ +€ +© +ª +« +¬ diff --git a/modules/pytorchocr/utils/dict/hi_dict.txt b/modules/pytorchocr/utils/dict/hi_dict.txt new file mode 100644 index 0000000..8dfedb5 --- /dev/null +++ b/modules/pytorchocr/utils/dict/hi_dict.txt @@ -0,0 +1,162 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +ँ +ं +ः +अ +आ +इ +ई +उ +ऊ +ऋ +ए +ऐ +ऑ +ओ +औ +क +ख +ग +घ +ङ +च +छ +ज +झ +ञ +ट +ठ +ड +ढ +ण +त +थ +द +ध +न +प +फ +ब +भ +म +य +र +ल +ळ +व +श +ष +स +ह +़ +ा +ि +ी +ु +ू +ृ +ॅ +े +ै +ॉ +ो +ौ +् +क़ +ख़ +ग़ +ज़ +ड़ +ढ़ +फ़ +० +१ +२ +३ +४ +५ +६ +७ +८ +९ +॰ diff --git a/modules/pytorchocr/utils/dict/it_dict.txt b/modules/pytorchocr/utils/dict/it_dict.txt new file mode 100644 index 0000000..e692c6d --- /dev/null +++ b/modules/pytorchocr/utils/dict/it_dict.txt @@ -0,0 +1,118 @@ +i +t +_ +m +g +/ +5 +I +L +S +V +R +C +2 +0 +1 +v +a +l +7 +8 +9 +6 +. +j +p + +e +r +o +d +s +n +3 +4 +P +u +c +A +- +, +" +z +h +f +b +q +ì +' +à +O +è +G +ù +é +ò +; +F +E +B +N +H +k +: +U +T +X +D +K +? +[ +M +­ +x +y +( +) +W +ö +º +w +] +Q +J ++ +ü +! +È +á +% += +» +ñ +Ö +Y +ä +í +Z +« +@ +ó +ø +ï +ú +ê +ç +Á +É +Å +ß +{ +} +& +` +û +î +# +$ diff --git a/modules/pytorchocr/utils/dict/japan_dict.txt b/modules/pytorchocr/utils/dict/japan_dict.txt new file mode 100644 index 0000000..339d4b8 --- /dev/null +++ b/modules/pytorchocr/utils/dict/japan_dict.txt @@ -0,0 +1,4399 @@ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +] +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +© +° +² +´ +½ +Á +Ä +Å +Ç +È +É +Í +Ó +Ö +× +Ü +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +í +ð +ñ +ò +ó +ô +õ +ö +ø +ú +û +ü +ý +ā +ă +ą +ć +Č +č +đ +ē +ė +ę +ğ +ī +ı +Ł +ł +ń +ň +ō +ř +Ş +ş +Š +š +ţ +ū +ż +Ž +ž +Ș +ș +ț +Δ +α +λ +μ +φ +Г +О +а +в +л +о +р +с +т +я +ồ +​ +— +― +’ +“ +” +… +℃ +→ +∇ +− +■ +☆ +  +、 +。 +々 +〆 +〈 +〉 +「 +」 +『 +』 +〔 +〕 +〜 +ぁ +あ +ぃ +い +う +ぇ +え +ぉ +お +か +が +き +ぎ +く +ぐ +け +げ +こ +ご +さ +ざ +し +じ +す +ず +せ +ぜ +そ +ぞ +た +だ +ち +ぢ +っ +つ +づ +て +で +と +ど +な +に +ぬ +ね +の +は +ば +ぱ +ひ +び +ぴ +ふ +ぶ +ぷ +へ +べ +ぺ +ほ +ぼ +ぽ +ま +み +む +め +も +ゃ +や +ゅ +ゆ +ょ +よ +ら +り +る +れ +ろ +わ +ゑ +を +ん +ゝ +ゞ +ァ +ア +ィ +イ +ゥ +ウ +ェ +エ +ォ +オ +カ +ガ +キ +ギ +ク +グ +ケ +ゲ +コ +ゴ +サ +ザ +シ +ジ +ス +ズ +セ +ゼ +ソ +ゾ +タ +ダ +チ +ヂ +ッ +ツ +ヅ +テ +デ +ト +ド +ナ +ニ +ヌ +ネ +ノ +ハ +バ +パ +ヒ +ビ +ピ +フ +ブ +プ +ヘ +ベ +ペ +ホ +ボ +ポ +マ +ミ +ム +メ +モ +ャ +ヤ +ュ +ユ +ョ +ヨ +ラ +リ +ル +レ +ロ +ワ +ヰ +ン +ヴ +ヵ +ヶ +・ +ー +㈱ +一 +丁 +七 +万 +丈 +三 +上 +下 +不 +与 +丑 +且 +世 +丘 +丙 +丞 +両 +並 +中 +串 +丸 +丹 +主 +丼 +丿 +乃 +久 +之 +乎 +乏 +乗 +乘 +乙 +九 +乞 +也 +乱 +乳 +乾 +亀 +了 +予 +争 +事 +二 +于 +互 +五 +井 +亘 +亙 +些 +亜 +亟 +亡 +交 +亥 +亦 +亨 +享 +京 +亭 +亮 +人 +什 +仁 +仇 +今 +介 +仍 +仏 +仔 +仕 +他 +仗 +付 +仙 +代 +令 +以 +仮 +仰 +仲 +件 +任 +企 +伊 +伍 +伎 +伏 +伐 +休 +会 +伝 +伯 +估 +伴 +伶 +伸 +伺 +似 +伽 +佃 +但 +位 +低 +住 +佐 +佑 +体 +何 +余 +佚 +佛 +作 +佩 +佳 +併 +佶 +使 +侈 +例 +侍 +侏 +侑 +侘 +供 +依 +侠 +価 +侮 +侯 +侵 +侶 +便 +係 +促 +俄 +俊 +俔 +俗 +俘 +保 +信 +俣 +俤 +修 +俯 +俳 +俵 +俸 +俺 +倉 +個 +倍 +倒 +候 +借 +倣 +値 +倫 +倭 +倶 +倹 +偃 +假 +偈 +偉 +偏 +偐 +偕 +停 +健 +側 +偵 +偶 +偽 +傀 +傅 +傍 +傑 +傘 +備 +催 +傭 +傲 +傳 +債 +傷 +傾 +僊 +働 +像 +僑 +僕 +僚 +僧 +僭 +僮 +儀 +億 +儇 +儒 +儛 +償 +儡 +優 +儲 +儺 +儼 +兀 +允 +元 +兄 +充 +兆 +先 +光 +克 +兌 +免 +兎 +児 +党 +兜 +入 +全 +八 +公 +六 +共 +兵 +其 +具 +典 +兼 +内 +円 +冊 +再 +冑 +冒 +冗 +写 +冠 +冤 +冥 +冨 +冬 +冲 +决 +冶 +冷 +准 +凉 +凋 +凌 +凍 +凛 +凝 +凞 +几 +凡 +処 +凪 +凰 +凱 +凶 +凸 +凹 +出 +函 +刀 +刃 +分 +切 +刈 +刊 +刎 +刑 +列 +初 +判 +別 +利 +刪 +到 +制 +刷 +券 +刹 +刺 +刻 +剃 +則 +削 +剋 +前 +剖 +剛 +剣 +剤 +剥 +剪 +副 +剰 +割 +創 +剽 +劇 +劉 +劔 +力 +功 +加 +劣 +助 +努 +劫 +劭 +励 +労 +効 +劾 +勃 +勅 +勇 +勉 +勒 +動 +勘 +務 +勝 +募 +勢 +勤 +勧 +勲 +勺 +勾 +勿 +匁 +匂 +包 +匏 +化 +北 +匙 +匝 +匠 +匡 +匣 +匯 +匲 +匹 +区 +医 +匿 +十 +千 +升 +午 +卉 +半 +卍 +卑 +卒 +卓 +協 +南 +単 +博 +卜 +占 +卦 +卯 +印 +危 +即 +却 +卵 +卸 +卿 +厄 +厚 +原 +厠 +厨 +厩 +厭 +厳 +去 +参 +又 +叉 +及 +友 +双 +反 +収 +叔 +取 +受 +叙 +叛 +叟 +叡 +叢 +口 +古 +句 +叩 +只 +叫 +召 +可 +台 +叱 +史 +右 +叶 +号 +司 +吃 +各 +合 +吉 +吊 +同 +名 +后 +吏 +吐 +向 +君 +吝 +吟 +吠 +否 +含 +吸 +吹 +吻 +吽 +吾 +呂 +呆 +呈 +呉 +告 +呑 +周 +呪 +呰 +味 +呼 +命 +咀 +咄 +咋 +和 +咒 +咫 +咲 +咳 +咸 +哀 +品 +哇 +哉 +員 +哨 +哩 +哭 +哲 +哺 +唄 +唆 +唇 +唐 +唖 +唯 +唱 +唳 +唸 +唾 +啄 +商 +問 +啓 +啼 +善 +喋 +喚 +喜 +喝 +喧 +喩 +喪 +喫 +喬 +單 +喰 +営 +嗅 +嗇 +嗔 +嗚 +嗜 +嗣 +嘆 +嘉 +嘗 +嘘 +嘩 +嘯 +嘱 +嘲 +嘴 +噂 +噌 +噛 +器 +噴 +噺 +嚆 +嚢 +囀 +囃 +囉 +囚 +四 +回 +因 +団 +困 +囲 +図 +固 +国 +圀 +圃 +國 +圏 +園 +圓 +團 +圜 +土 +圧 +在 +圭 +地 +址 +坂 +均 +坊 +坐 +坑 +坡 +坤 +坦 +坪 +垂 +型 +垢 +垣 +埃 +埋 +城 +埒 +埔 +域 +埠 +埴 +埵 +執 +培 +基 +埼 +堀 +堂 +堅 +堆 +堕 +堤 +堪 +堯 +堰 +報 +場 +堵 +堺 +塀 +塁 +塊 +塑 +塔 +塗 +塘 +塙 +塚 +塞 +塩 +填 +塵 +塾 +境 +墉 +墓 +増 +墜 +墟 +墨 +墳 +墺 +墻 +墾 +壁 +壇 +壊 +壌 +壕 +士 +壬 +壮 +声 +壱 +売 +壷 +壹 +壺 +壽 +変 +夏 +夕 +外 +夙 +多 +夜 +夢 +夥 +大 +天 +太 +夫 +夬 +夭 +央 +失 +夷 +夾 +奄 +奇 +奈 +奉 +奎 +奏 +契 +奔 +奕 +套 +奘 +奠 +奢 +奥 +奨 +奪 +奮 +女 +奴 +奸 +好 +如 +妃 +妄 +妊 +妍 +妓 +妖 +妙 +妥 +妨 +妬 +妲 +妹 +妻 +妾 +姉 +始 +姐 +姓 +委 +姚 +姜 +姞 +姥 +姦 +姨 +姪 +姫 +姶 +姻 +姿 +威 +娑 +娘 +娟 +娠 +娩 +娯 +娼 +婆 +婉 +婚 +婢 +婦 +婬 +婿 +媄 +媒 +媓 +媚 +媛 +媞 +媽 +嫁 +嫄 +嫉 +嫌 +嫐 +嫗 +嫡 +嬉 +嬌 +嬢 +嬪 +嬬 +嬾 +孁 +子 +孔 +字 +存 +孚 +孝 +孟 +季 +孤 +学 +孫 +孵 +學 +宅 +宇 +守 +安 +宋 +完 +宍 +宏 +宕 +宗 +官 +宙 +定 +宛 +宜 +宝 +実 +客 +宣 +室 +宥 +宮 +宰 +害 +宴 +宵 +家 +宸 +容 +宿 +寂 +寄 +寅 +密 +寇 +富 +寒 +寓 +寔 +寛 +寝 +察 +寡 +實 +寧 +審 +寮 +寵 +寶 +寸 +寺 +対 +寿 +封 +専 +射 +将 +尉 +尊 +尋 +對 +導 +小 +少 +尖 +尚 +尤 +尪 +尭 +就 +尹 +尺 +尻 +尼 +尽 +尾 +尿 +局 +居 +屈 +届 +屋 +屍 +屎 +屏 +屑 +屓 +展 +属 +屠 +層 +履 +屯 +山 +岐 +岑 +岡 +岩 +岫 +岬 +岳 +岷 +岸 +峠 +峡 +峨 +峯 +峰 +島 +峻 +崇 +崋 +崎 +崑 +崖 +崗 +崛 +崩 +嵌 +嵐 +嵩 +嵯 +嶂 +嶋 +嶠 +嶺 +嶼 +嶽 +巀 +巌 +巒 +巖 +川 +州 +巡 +巣 +工 +左 +巧 +巨 +巫 +差 +己 +巳 +巴 +巷 +巻 +巽 +巾 +市 +布 +帆 +希 +帖 +帚 +帛 +帝 +帥 +師 +席 +帯 +帰 +帳 +帷 +常 +帽 +幄 +幅 +幇 +幌 +幔 +幕 +幟 +幡 +幢 +幣 +干 +平 +年 +并 +幸 +幹 +幻 +幼 +幽 +幾 +庁 +広 +庄 +庇 +床 +序 +底 +庖 +店 +庚 +府 +度 +座 +庫 +庭 +庵 +庶 +康 +庸 +廂 +廃 +廉 +廊 +廓 +廟 +廠 +廣 +廬 +延 +廷 +建 +廻 +廼 +廿 +弁 +弄 +弉 +弊 +弌 +式 +弐 +弓 +弔 +引 +弖 +弗 +弘 +弛 +弟 +弥 +弦 +弧 +弱 +張 +強 +弼 +弾 +彈 +彊 +彌 +彎 +当 +彗 +彙 +彝 +形 +彦 +彩 +彫 +彬 +彭 +彰 +影 +彷 +役 +彼 +往 +征 +徂 +径 +待 +律 +後 +徐 +徑 +徒 +従 +得 +徠 +御 +徧 +徨 +復 +循 +徭 +微 +徳 +徴 +德 +徹 +徽 +心 +必 +忉 +忌 +忍 +志 +忘 +忙 +応 +忠 +快 +忯 +念 +忻 +忽 +忿 +怒 +怖 +思 +怠 +怡 +急 +性 +怨 +怪 +怯 +恂 +恋 +恐 +恒 +恕 +恣 +恤 +恥 +恨 +恩 +恬 +恭 +息 +恵 +悉 +悌 +悍 +悔 +悟 +悠 +患 +悦 +悩 +悪 +悲 +悼 +情 +惇 +惑 +惚 +惜 +惟 +惠 +惣 +惧 +惨 +惰 +想 +惹 +惺 +愈 +愉 +愍 +意 +愔 +愚 +愛 +感 +愷 +愿 +慈 +態 +慌 +慎 +慕 +慢 +慣 +慧 +慨 +慮 +慰 +慶 +憂 +憎 +憐 +憑 +憙 +憤 +憧 +憩 +憬 +憲 +憶 +憾 +懇 +應 +懌 +懐 +懲 +懸 +懺 +懽 +懿 +戈 +戊 +戌 +戎 +成 +我 +戒 +戔 +或 +戚 +戟 +戦 +截 +戮 +戯 +戴 +戸 +戻 +房 +所 +扁 +扇 +扈 +扉 +手 +才 +打 +払 +托 +扮 +扱 +扶 +批 +承 +技 +抄 +把 +抑 +抓 +投 +抗 +折 +抜 +択 +披 +抱 +抵 +抹 +押 +抽 +担 +拇 +拈 +拉 +拍 +拏 +拐 +拒 +拓 +拘 +拙 +招 +拝 +拠 +拡 +括 +拭 +拳 +拵 +拶 +拾 +拿 +持 +挂 +指 +按 +挑 +挙 +挟 +挨 +振 +挺 +挽 +挿 +捉 +捕 +捗 +捜 +捧 +捨 +据 +捺 +捻 +掃 +掄 +授 +掌 +排 +掖 +掘 +掛 +掟 +採 +探 +掣 +接 +控 +推 +掩 +措 +掬 +掲 +掴 +掻 +掾 +揃 +揄 +揆 +揉 +描 +提 +揖 +揚 +換 +握 +揮 +援 +揶 +揺 +損 +搦 +搬 +搭 +携 +搾 +摂 +摘 +摩 +摸 +摺 +撃 +撒 +撞 +撤 +撥 +撫 +播 +撮 +撰 +撲 +撹 +擁 +操 +擔 +擦 +擬 +擾 +攘 +攝 +攣 +支 +收 +改 +攻 +放 +政 +故 +敏 +救 +敗 +教 +敢 +散 +敦 +敬 +数 +整 +敵 +敷 +斂 +文 +斉 +斎 +斐 +斑 +斗 +料 +斜 +斟 +斤 +斥 +斧 +斬 +断 +斯 +新 +方 +於 +施 +旁 +旅 +旋 +旌 +族 +旗 +旛 +无 +旡 +既 +日 +旦 +旧 +旨 +早 +旬 +旭 +旺 +旻 +昂 +昆 +昇 +昉 +昌 +明 +昏 +易 +昔 +星 +映 +春 +昧 +昨 +昪 +昭 +是 +昵 +昼 +晁 +時 +晃 +晋 +晏 +晒 +晟 +晦 +晧 +晩 +普 +景 +晴 +晶 +智 +暁 +暇 +暈 +暉 +暑 +暖 +暗 +暘 +暢 +暦 +暫 +暮 +暲 +暴 +暹 +暾 +曄 +曇 +曉 +曖 +曙 +曜 +曝 +曠 +曰 +曲 +曳 +更 +書 +曹 +曼 +曽 +曾 +替 +最 +會 +月 +有 +朋 +服 +朏 +朔 +朕 +朗 +望 +朝 +期 +朧 +木 +未 +末 +本 +札 +朱 +朴 +机 +朽 +杁 +杉 +李 +杏 +材 +村 +杓 +杖 +杜 +杞 +束 +条 +杢 +杣 +来 +杭 +杮 +杯 +東 +杲 +杵 +杷 +杼 +松 +板 +枅 +枇 +析 +枓 +枕 +林 +枚 +果 +枝 +枠 +枡 +枢 +枯 +枳 +架 +柄 +柊 +柏 +某 +柑 +染 +柔 +柘 +柚 +柯 +柱 +柳 +柴 +柵 +査 +柾 +柿 +栂 +栃 +栄 +栖 +栗 +校 +株 +栲 +栴 +核 +根 +栻 +格 +栽 +桁 +桂 +桃 +框 +案 +桐 +桑 +桓 +桔 +桜 +桝 +桟 +桧 +桴 +桶 +桾 +梁 +梅 +梆 +梓 +梔 +梗 +梛 +條 +梟 +梢 +梧 +梨 +械 +梱 +梲 +梵 +梶 +棄 +棋 +棒 +棗 +棘 +棚 +棟 +棠 +森 +棲 +棹 +棺 +椀 +椅 +椋 +植 +椎 +椏 +椒 +椙 +検 +椥 +椹 +椿 +楊 +楓 +楕 +楚 +楞 +楠 +楡 +楢 +楨 +楪 +楫 +業 +楮 +楯 +楳 +極 +楷 +楼 +楽 +概 +榊 +榎 +榕 +榛 +榜 +榮 +榱 +榴 +槃 +槇 +槊 +構 +槌 +槍 +槐 +様 +槙 +槻 +槽 +槿 +樂 +樋 +樓 +樗 +標 +樟 +模 +権 +横 +樫 +樵 +樹 +樺 +樽 +橇 +橋 +橘 +機 +橿 +檀 +檄 +檎 +檐 +檗 +檜 +檣 +檥 +檬 +檮 +檸 +檻 +櫃 +櫓 +櫛 +櫟 +櫨 +櫻 +欄 +欅 +欠 +次 +欣 +欧 +欲 +欺 +欽 +款 +歌 +歎 +歓 +止 +正 +此 +武 +歩 +歪 +歯 +歳 +歴 +死 +殆 +殉 +殊 +残 +殖 +殯 +殴 +段 +殷 +殺 +殻 +殿 +毀 +毅 +母 +毎 +毒 +比 +毘 +毛 +毫 +毬 +氈 +氏 +民 +気 +水 +氷 +永 +氾 +汀 +汁 +求 +汎 +汐 +汗 +汚 +汝 +江 +池 +汪 +汰 +汲 +決 +汽 +沂 +沃 +沅 +沆 +沈 +沌 +沐 +沓 +沖 +沙 +没 +沢 +沱 +河 +沸 +油 +治 +沼 +沽 +沿 +況 +泉 +泊 +泌 +法 +泗 +泡 +波 +泣 +泥 +注 +泯 +泰 +泳 +洋 +洒 +洗 +洛 +洞 +津 +洩 +洪 +洲 +洸 +洹 +活 +洽 +派 +流 +浄 +浅 +浙 +浚 +浜 +浣 +浦 +浩 +浪 +浮 +浴 +海 +浸 +涅 +消 +涌 +涙 +涛 +涯 +液 +涵 +涼 +淀 +淄 +淆 +淇 +淋 +淑 +淘 +淡 +淤 +淨 +淫 +深 +淳 +淵 +混 +淹 +添 +清 +済 +渉 +渋 +渓 +渕 +渚 +減 +渟 +渠 +渡 +渤 +渥 +渦 +温 +渫 +測 +港 +游 +渾 +湊 +湖 +湘 +湛 +湧 +湫 +湯 +湾 +湿 +満 +源 +準 +溜 +溝 +溢 +溥 +溪 +溶 +溺 +滄 +滅 +滋 +滌 +滑 +滕 +滝 +滞 +滴 +滸 +滹 +滿 +漁 +漂 +漆 +漉 +漏 +漑 +演 +漕 +漠 +漢 +漣 +漫 +漬 +漱 +漸 +漿 +潅 +潔 +潙 +潜 +潟 +潤 +潭 +潮 +潰 +潴 +澁 +澂 +澄 +澎 +澗 +澤 +澪 +澱 +澳 +激 +濁 +濃 +濟 +濠 +濡 +濤 +濫 +濯 +濱 +濾 +瀉 +瀋 +瀑 +瀕 +瀞 +瀟 +瀧 +瀬 +瀾 +灌 +灑 +灘 +火 +灯 +灰 +灸 +災 +炉 +炊 +炎 +炒 +炭 +炮 +炷 +点 +為 +烈 +烏 +烙 +烝 +烹 +焔 +焙 +焚 +無 +焦 +然 +焼 +煇 +煉 +煌 +煎 +煕 +煙 +煤 +煥 +照 +煩 +煬 +煮 +煽 +熈 +熊 +熙 +熟 +熨 +熱 +熹 +熾 +燃 +燈 +燎 +燔 +燕 +燗 +燥 +燭 +燻 +爆 +爐 +爪 +爬 +爲 +爵 +父 +爺 +爼 +爽 +爾 +片 +版 +牌 +牒 +牘 +牙 +牛 +牝 +牟 +牡 +牢 +牧 +物 +牲 +特 +牽 +犂 +犠 +犬 +犯 +状 +狂 +狄 +狐 +狗 +狙 +狛 +狡 +狩 +独 +狭 +狷 +狸 +狼 +猊 +猛 +猟 +猥 +猨 +猩 +猪 +猫 +献 +猴 +猶 +猷 +猾 +猿 +獄 +獅 +獏 +獣 +獲 +玄 +玅 +率 +玉 +王 +玖 +玩 +玲 +珀 +珂 +珈 +珉 +珊 +珍 +珎 +珞 +珠 +珣 +珥 +珪 +班 +現 +球 +理 +琉 +琢 +琥 +琦 +琮 +琲 +琳 +琴 +琵 +琶 +瑁 +瑋 +瑙 +瑚 +瑛 +瑜 +瑞 +瑠 +瑤 +瑩 +瑪 +瑳 +瑾 +璃 +璋 +璜 +璞 +璧 +璨 +環 +璵 +璽 +璿 +瓊 +瓔 +瓜 +瓢 +瓦 +瓶 +甍 +甑 +甕 +甘 +甚 +甞 +生 +産 +甥 +用 +甫 +田 +由 +甲 +申 +男 +町 +画 +界 +畏 +畑 +畔 +留 +畜 +畝 +畠 +畢 +略 +番 +異 +畳 +當 +畷 +畸 +畺 +畿 +疆 +疇 +疋 +疎 +疏 +疑 +疫 +疱 +疲 +疹 +疼 +疾 +病 +症 +痒 +痔 +痕 +痘 +痙 +痛 +痢 +痩 +痴 +痺 +瘍 +瘡 +瘧 +療 +癇 +癌 +癒 +癖 +癡 +癪 +発 +登 +白 +百 +的 +皆 +皇 +皋 +皐 +皓 +皮 +皺 +皿 +盂 +盃 +盆 +盈 +益 +盒 +盗 +盛 +盞 +盟 +盡 +監 +盤 +盥 +盧 +目 +盲 +直 +相 +盾 +省 +眉 +看 +県 +眞 +真 +眠 +眷 +眺 +眼 +着 +睡 +督 +睦 +睨 +睿 +瞋 +瞑 +瞞 +瞬 +瞭 +瞰 +瞳 +瞻 +瞼 +瞿 +矍 +矛 +矜 +矢 +知 +矧 +矩 +短 +矮 +矯 +石 +砂 +砌 +研 +砕 +砥 +砦 +砧 +砲 +破 +砺 +硝 +硫 +硬 +硯 +碁 +碇 +碌 +碑 +碓 +碕 +碗 +碣 +碧 +碩 +確 +碾 +磁 +磐 +磔 +磧 +磨 +磬 +磯 +礁 +礎 +礒 +礙 +礫 +礬 +示 +礼 +社 +祀 +祁 +祇 +祈 +祉 +祐 +祓 +祕 +祖 +祗 +祚 +祝 +神 +祟 +祠 +祢 +祥 +票 +祭 +祷 +祺 +禁 +禄 +禅 +禊 +禍 +禎 +福 +禔 +禖 +禛 +禦 +禧 +禮 +禰 +禹 +禽 +禿 +秀 +私 +秋 +科 +秒 +秘 +租 +秤 +秦 +秩 +称 +移 +稀 +程 +税 +稔 +稗 +稙 +稚 +稜 +稠 +種 +稱 +稲 +稷 +稻 +稼 +稽 +稿 +穀 +穂 +穆 +積 +穎 +穏 +穗 +穜 +穢 +穣 +穫 +穴 +究 +空 +突 +窃 +窄 +窒 +窓 +窟 +窠 +窩 +窪 +窮 +窯 +竃 +竄 +竈 +立 +站 +竜 +竝 +竟 +章 +童 +竪 +竭 +端 +竴 +競 +竹 +竺 +竽 +竿 +笄 +笈 +笏 +笑 +笙 +笛 +笞 +笠 +笥 +符 +第 +笹 +筅 +筆 +筇 +筈 +等 +筋 +筌 +筍 +筏 +筐 +筑 +筒 +答 +策 +筝 +筥 +筧 +筬 +筮 +筯 +筰 +筵 +箆 +箇 +箋 +箏 +箒 +箔 +箕 +算 +箙 +箜 +管 +箪 +箭 +箱 +箸 +節 +篁 +範 +篆 +篇 +築 +篋 +篌 +篝 +篠 +篤 +篥 +篦 +篩 +篭 +篳 +篷 +簀 +簒 +簡 +簧 +簪 +簫 +簺 +簾 +簿 +籀 +籃 +籌 +籍 +籐 +籟 +籠 +籤 +籬 +米 +籾 +粂 +粉 +粋 +粒 +粕 +粗 +粘 +粛 +粟 +粥 +粧 +粮 +粳 +精 +糊 +糖 +糜 +糞 +糟 +糠 +糧 +糯 +糸 +糺 +系 +糾 +紀 +約 +紅 +紋 +納 +紐 +純 +紗 +紘 +紙 +級 +紛 +素 +紡 +索 +紫 +紬 +累 +細 +紳 +紵 +紹 +紺 +絁 +終 +絃 +組 +絅 +経 +結 +絖 +絞 +絡 +絣 +給 +統 +絲 +絵 +絶 +絹 +絽 +綏 +經 +継 +続 +綜 +綟 +綬 +維 +綱 +網 +綴 +綸 +綺 +綽 +綾 +綿 +緊 +緋 +総 +緑 +緒 +線 +締 +緥 +編 +緩 +緬 +緯 +練 +緻 +縁 +縄 +縅 +縒 +縛 +縞 +縢 +縣 +縦 +縫 +縮 +縹 +總 +績 +繁 +繊 +繋 +繍 +織 +繕 +繝 +繦 +繧 +繰 +繹 +繼 +纂 +纈 +纏 +纐 +纒 +纛 +缶 +罔 +罠 +罧 +罪 +置 +罰 +署 +罵 +罷 +罹 +羂 +羅 +羆 +羇 +羈 +羊 +羌 +美 +群 +羨 +義 +羯 +羲 +羹 +羽 +翁 +翅 +翌 +習 +翔 +翛 +翠 +翡 +翫 +翰 +翺 +翻 +翼 +耀 +老 +考 +者 +耆 +而 +耐 +耕 +耗 +耨 +耳 +耶 +耽 +聊 +聖 +聘 +聚 +聞 +聟 +聡 +聨 +聯 +聰 +聲 +聴 +職 +聾 +肄 +肆 +肇 +肉 +肋 +肌 +肖 +肘 +肛 +肝 +股 +肢 +肥 +肩 +肪 +肯 +肱 +育 +肴 +肺 +胃 +胆 +背 +胎 +胖 +胚 +胝 +胞 +胡 +胤 +胱 +胴 +胸 +能 +脂 +脅 +脆 +脇 +脈 +脊 +脚 +脛 +脩 +脱 +脳 +腋 +腎 +腐 +腑 +腔 +腕 +腫 +腰 +腱 +腸 +腹 +腺 +腿 +膀 +膏 +膚 +膜 +膝 +膠 +膣 +膨 +膩 +膳 +膵 +膾 +膿 +臂 +臆 +臈 +臍 +臓 +臘 +臚 +臣 +臥 +臨 +自 +臭 +至 +致 +臺 +臼 +舂 +舅 +與 +興 +舌 +舍 +舎 +舒 +舖 +舗 +舘 +舜 +舞 +舟 +舩 +航 +般 +舳 +舶 +船 +艇 +艘 +艦 +艮 +良 +色 +艶 +芋 +芒 +芙 +芝 +芥 +芦 +芬 +芭 +芯 +花 +芳 +芸 +芹 +芻 +芽 +芿 +苅 +苑 +苔 +苗 +苛 +苞 +苡 +若 +苦 +苧 +苫 +英 +苴 +苻 +茂 +范 +茄 +茅 +茎 +茗 +茘 +茜 +茨 +茲 +茵 +茶 +茸 +茹 +草 +荊 +荏 +荒 +荘 +荷 +荻 +荼 +莞 +莪 +莫 +莬 +莱 +莵 +莽 +菅 +菊 +菌 +菓 +菖 +菘 +菜 +菟 +菩 +菫 +華 +菱 +菴 +萄 +萊 +萌 +萍 +萎 +萠 +萩 +萬 +萱 +落 +葉 +著 +葛 +葡 +董 +葦 +葩 +葬 +葭 +葱 +葵 +葺 +蒋 +蒐 +蒔 +蒙 +蒟 +蒡 +蒲 +蒸 +蒻 +蒼 +蒿 +蓄 +蓆 +蓉 +蓋 +蓑 +蓬 +蓮 +蓼 +蔀 +蔑 +蔓 +蔚 +蔡 +蔦 +蔬 +蔭 +蔵 +蔽 +蕃 +蕉 +蕊 +蕎 +蕨 +蕩 +蕪 +蕭 +蕾 +薄 +薇 +薊 +薔 +薗 +薙 +薛 +薦 +薨 +薩 +薪 +薫 +薬 +薭 +薮 +藁 +藉 +藍 +藏 +藐 +藝 +藤 +藩 +藪 +藷 +藹 +藺 +藻 +蘂 +蘆 +蘇 +蘊 +蘭 +虎 +虐 +虔 +虚 +虜 +虞 +號 +虫 +虹 +虻 +蚊 +蚕 +蛇 +蛉 +蛍 +蛎 +蛙 +蛛 +蛟 +蛤 +蛭 +蛮 +蛸 +蛹 +蛾 +蜀 +蜂 +蜃 +蜆 +蜊 +蜘 +蜜 +蜷 +蜻 +蝉 +蝋 +蝕 +蝙 +蝠 +蝦 +蝶 +蝿 +螂 +融 +螣 +螺 +蟄 +蟇 +蟠 +蟷 +蟹 +蟻 +蠢 +蠣 +血 +衆 +行 +衍 +衒 +術 +街 +衙 +衛 +衝 +衞 +衡 +衢 +衣 +表 +衫 +衰 +衵 +衷 +衽 +衾 +衿 +袁 +袈 +袋 +袍 +袒 +袖 +袙 +袞 +袢 +被 +袰 +袱 +袴 +袷 +袿 +裁 +裂 +裃 +装 +裏 +裔 +裕 +裘 +裙 +補 +裟 +裡 +裲 +裳 +裴 +裸 +裹 +製 +裾 +褂 +褄 +複 +褌 +褐 +褒 +褥 +褪 +褶 +褻 +襄 +襖 +襞 +襟 +襠 +襦 +襪 +襲 +襴 +襷 +西 +要 +覆 +覇 +覈 +見 +規 +視 +覗 +覚 +覧 +親 +覲 +観 +覺 +觀 +角 +解 +触 +言 +訂 +計 +討 +訓 +託 +記 +訛 +訟 +訢 +訥 +訪 +設 +許 +訳 +訴 +訶 +診 +註 +証 +詐 +詔 +評 +詛 +詞 +詠 +詢 +詣 +試 +詩 +詫 +詮 +詰 +話 +該 +詳 +誄 +誅 +誇 +誉 +誌 +認 +誓 +誕 +誘 +語 +誠 +誡 +誣 +誤 +誥 +誦 +説 +読 +誰 +課 +誼 +誾 +調 +談 +請 +諌 +諍 +諏 +諒 +論 +諚 +諜 +諟 +諡 +諦 +諧 +諫 +諭 +諮 +諱 +諶 +諷 +諸 +諺 +諾 +謀 +謄 +謌 +謎 +謗 +謙 +謚 +講 +謝 +謡 +謫 +謬 +謹 +證 +識 +譚 +譛 +譜 +警 +譬 +譯 +議 +譲 +譴 +護 +讀 +讃 +讐 +讒 +谷 +谿 +豅 +豆 +豊 +豎 +豐 +豚 +象 +豪 +豫 +豹 +貌 +貝 +貞 +負 +財 +貢 +貧 +貨 +販 +貪 +貫 +責 +貯 +貰 +貴 +買 +貸 +費 +貼 +貿 +賀 +賁 +賂 +賃 +賄 +資 +賈 +賊 +賎 +賑 +賓 +賛 +賜 +賞 +賠 +賢 +賣 +賤 +賦 +質 +賭 +購 +賽 +贄 +贅 +贈 +贋 +贔 +贖 +赤 +赦 +走 +赴 +起 +超 +越 +趙 +趣 +足 +趺 +趾 +跋 +跏 +距 +跡 +跨 +跪 +路 +跳 +践 +踊 +踏 +踐 +踞 +踪 +踵 +蹄 +蹉 +蹊 +蹟 +蹲 +蹴 +躅 +躇 +躊 +躍 +躑 +躙 +躪 +身 +躬 +躯 +躰 +車 +軋 +軌 +軍 +軒 +軟 +転 +軸 +軻 +軽 +軾 +較 +載 +輌 +輔 +輜 +輝 +輦 +輩 +輪 +輯 +輸 +輿 +轄 +轍 +轟 +轢 +辛 +辞 +辟 +辥 +辦 +辨 +辰 +辱 +農 +辺 +辻 +込 +迂 +迅 +迎 +近 +返 +迢 +迦 +迪 +迫 +迭 +述 +迷 +迹 +追 +退 +送 +逃 +逅 +逆 +逍 +透 +逐 +逓 +途 +逕 +逗 +這 +通 +逝 +逞 +速 +造 +逢 +連 +逮 +週 +進 +逸 +逼 +遁 +遂 +遅 +遇 +遊 +運 +遍 +過 +遐 +道 +達 +違 +遙 +遜 +遠 +遡 +遣 +遥 +適 +遭 +遮 +遯 +遵 +遷 +選 +遺 +遼 +避 +邀 +邁 +邂 +邃 +還 +邇 +邉 +邊 +邑 +那 +邦 +邨 +邪 +邯 +邵 +邸 +郁 +郊 +郎 +郡 +郢 +部 +郭 +郴 +郵 +郷 +都 +鄂 +鄙 +鄭 +鄰 +鄲 +酉 +酋 +酌 +配 +酎 +酒 +酔 +酢 +酥 +酪 +酬 +酵 +酷 +酸 +醍 +醐 +醒 +醗 +醜 +醤 +醪 +醵 +醸 +采 +釈 +釉 +釋 +里 +重 +野 +量 +釐 +金 +釘 +釜 +針 +釣 +釧 +釿 +鈍 +鈎 +鈐 +鈔 +鈞 +鈦 +鈴 +鈷 +鈸 +鈿 +鉄 +鉇 +鉉 +鉋 +鉛 +鉢 +鉤 +鉦 +鉱 +鉾 +銀 +銃 +銅 +銈 +銑 +銕 +銘 +銚 +銜 +銭 +鋏 +鋒 +鋤 +鋭 +鋲 +鋳 +鋸 +鋺 +鋼 +錆 +錍 +錐 +錘 +錠 +錣 +錦 +錫 +錬 +錯 +録 +錵 +鍋 +鍍 +鍑 +鍔 +鍛 +鍬 +鍮 +鍵 +鍼 +鍾 +鎌 +鎖 +鎗 +鎚 +鎧 +鎬 +鎮 +鎰 +鎹 +鏃 +鏑 +鏡 +鐃 +鐇 +鐐 +鐔 +鐘 +鐙 +鐚 +鐡 +鐵 +鐸 +鑁 +鑊 +鑑 +鑒 +鑚 +鑠 +鑢 +鑰 +鑵 +鑷 +鑼 +鑽 +鑿 +長 +門 +閃 +閇 +閉 +開 +閏 +閑 +間 +閔 +閘 +関 +閣 +閤 +閥 +閦 +閨 +閬 +閲 +閻 +閼 +閾 +闇 +闍 +闔 +闕 +闘 +關 +闡 +闢 +闥 +阜 +阪 +阮 +阯 +防 +阻 +阿 +陀 +陂 +附 +陌 +降 +限 +陛 +陞 +院 +陣 +除 +陥 +陪 +陬 +陰 +陳 +陵 +陶 +陸 +険 +陽 +隅 +隆 +隈 +隊 +隋 +階 +随 +隔 +際 +障 +隠 +隣 +隧 +隷 +隻 +隼 +雀 +雁 +雄 +雅 +集 +雇 +雉 +雊 +雋 +雌 +雍 +雑 +雖 +雙 +雛 +離 +難 +雨 +雪 +雫 +雰 +雲 +零 +雷 +雹 +電 +需 +震 +霊 +霍 +霖 +霜 +霞 +霧 +霰 +露 +靈 +青 +靖 +静 +靜 +非 +面 +革 +靫 +靭 +靱 +靴 +靺 +鞁 +鞄 +鞆 +鞋 +鞍 +鞏 +鞘 +鞠 +鞨 +鞭 +韋 +韓 +韜 +韮 +音 +韶 +韻 +響 +頁 +頂 +頃 +項 +順 +須 +頌 +預 +頑 +頒 +頓 +領 +頚 +頬 +頭 +頴 +頸 +頻 +頼 +顆 +題 +額 +顎 +顔 +顕 +顗 +願 +顛 +類 +顧 +顯 +風 +飛 +食 +飢 +飩 +飫 +飯 +飲 +飴 +飼 +飽 +飾 +餃 +餅 +餉 +養 +餌 +餐 +餓 +餘 +餝 +餡 +館 +饂 +饅 +饉 +饋 +饌 +饒 +饗 +首 +馗 +香 +馨 +馬 +馳 +馴 +駄 +駅 +駆 +駈 +駐 +駒 +駕 +駝 +駿 +騁 +騎 +騏 +騒 +験 +騙 +騨 +騰 +驕 +驚 +驛 +驢 +骨 +骸 +髄 +體 +高 +髙 +髢 +髪 +髭 +髮 +髷 +髻 +鬘 +鬚 +鬢 +鬨 +鬯 +鬱 +鬼 +魁 +魂 +魄 +魅 +魏 +魔 +魚 +魯 +鮎 +鮑 +鮒 +鮪 +鮫 +鮭 +鮮 +鯉 +鯔 +鯖 +鯛 +鯨 +鯰 +鯱 +鰐 +鰒 +鰭 +鰯 +鰰 +鰹 +鰻 +鱈 +鱒 +鱗 +鱧 +鳥 +鳩 +鳰 +鳳 +鳴 +鳶 +鴈 +鴉 +鴎 +鴛 +鴟 +鴦 +鴨 +鴫 +鴻 +鵄 +鵜 +鵞 +鵡 +鵬 +鵲 +鵺 +鶉 +鶏 +鶯 +鶴 +鷄 +鷙 +鷲 +鷹 +鷺 +鸚 +鸞 +鹸 +鹽 +鹿 +麁 +麒 +麓 +麗 +麝 +麞 +麟 +麦 +麩 +麹 +麺 +麻 +麾 +麿 +黄 +黌 +黍 +黒 +黙 +黛 +黠 +鼈 +鼉 +鼎 +鼓 +鼠 +鼻 +齊 +齋 +齟 +齢 +齬 +龍 +龕 +龗 +! +# +% +& +( +) ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; += +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +R +S +T +U +V +W +X +Z +a +c +d +e +f +h +i +j +k +l +m +n +o +p +r +s +t +u +y +z +~ +・ + diff --git a/modules/pytorchocr/utils/dict/ka_dict.txt b/modules/pytorchocr/utils/dict/ka_dict.txt new file mode 100644 index 0000000..33d605c --- /dev/null +++ b/modules/pytorchocr/utils/dict/ka_dict.txt @@ -0,0 +1,153 @@ +k +a +_ +i +m +g +/ +1 +2 +I +L +S +V +R +C +0 +v +l +6 +4 +8 +. +j +p +ಗ +ು +ಣ +ಪ +ಡ +ಿ +ಸ +ಲ +ಾ +ದ +್ +7 +5 +3 +ವ +ಷ +ಬ +ಹ +ೆ +9 +ಅ +ಳ +ನ +ರ +ಉ +ಕ +ಎ +ೇ +ಂ +ೈ +ೊ +ೀ +ಯ +ೋ +ತ +ಶ +ಭ +ಧ +ಚ +ಜ +ೂ +ಮ +ಒ +ೃ +ಥ +ಇ +ಟ +ಖ +ಆ +ಞ +ಫ +- +ಢ +ಊ +ಓ +ಐ +ಃ +ಘ +ಝ +ೌ +ಠ +ಛ +ಔ +ಏ +ಈ +ಋ +೨ +೦ +೧ +೮ +೯ +೪ +, +೫ +೭ +೩ +೬ +ಙ +s +c +e +n +w +o +u +t +d +E +A +T +B +Z +N +G +O +q +z +r +x +P +K +M +J +U +D +f +F +h +b +W +Y +y +H +X +Q +' +# +& +! +@ +$ +: +% +é +É +( +? ++ + diff --git a/modules/pytorchocr/utils/dict/kn_dict.txt b/modules/pytorchocr/utils/dict/kn_dict.txt new file mode 100644 index 0000000..33d605c --- /dev/null +++ b/modules/pytorchocr/utils/dict/kn_dict.txt @@ -0,0 +1,153 @@ +k +a +_ +i +m +g +/ +1 +2 +I +L +S +V +R +C +0 +v +l +6 +4 +8 +. +j +p +ಗ +ು +ಣ +ಪ +ಡ +ಿ +ಸ +ಲ +ಾ +ದ +್ +7 +5 +3 +ವ +ಷ +ಬ +ಹ +ೆ +9 +ಅ +ಳ +ನ +ರ +ಉ +ಕ +ಎ +ೇ +ಂ +ೈ +ೊ +ೀ +ಯ +ೋ +ತ +ಶ +ಭ +ಧ +ಚ +ಜ +ೂ +ಮ +ಒ +ೃ +ಥ +ಇ +ಟ +ಖ +ಆ +ಞ +ಫ +- +ಢ +ಊ +ಓ +ಐ +ಃ +ಘ +ಝ +ೌ +ಠ +ಛ +ಔ +ಏ +ಈ +ಋ +೨ +೦ +೧ +೮ +೯ +೪ +, +೫ +೭ +೩ +೬ +ಙ +s +c +e +n +w +o +u +t +d +E +A +T +B +Z +N +G +O +q +z +r +x +P +K +M +J +U +D +f +F +h +b +W +Y +y +H +X +Q +' +# +& +! +@ +$ +: +% +é +É +( +? ++ + diff --git a/modules/pytorchocr/utils/dict/korean_dict.txt b/modules/pytorchocr/utils/dict/korean_dict.txt new file mode 100644 index 0000000..a13899f --- /dev/null +++ b/modules/pytorchocr/utils/dict/korean_dict.txt @@ -0,0 +1,3688 @@ +! +" +# +$ +% +& +' +* ++ +- +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +© +° +² +½ +Á +Ä +Å +Ç +É +Í +Î +Ó +Ö +× +Ü +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +ì +í +î +ï +ð +ñ +ò +ó +ô +õ +ö +ø +ú +û +ü +ý +ā +ă +ą +ć +Č +č +đ +ē +ė +ę +ě +ğ +ī +İ +ı +Ł +ł +ń +ň +ō +ř +Ş +ş +Š +š +ţ +ū +ź +ż +Ž +ž +Ș +ș +Α +Δ +α +λ +φ +Г +О +а +в +л +о +р +с +т +я +​ +’ +“ +” +→ +∇ +∼ +「 +」 +ア +カ +グ +ニ +ラ +ン +ㄱ +ㄴ +ㄷ +ㄸ +ㄹ +ㅂ +ㅅ +ㅆ +ㅇ +ㅈ +ㅊ +ㅋ +ㅌ +ㅎ +ㅓ +ㅜ +ㅣ +一 +丁 +七 +三 +上 +下 +不 +丑 +世 +丘 +丞 +中 +丸 +丹 +主 +乃 +久 +之 +乎 +乘 +九 +也 +乳 +乾 +事 +二 +云 +互 +五 +井 +亞 +亡 +交 +亥 +亨 +享 +京 +亭 +人 +仁 +今 +他 +仙 +代 +令 +以 +仰 +仲 +件 +任 +企 +伊 +伍 +伎 +伏 +伐 +休 +伯 +伴 +伸 +佃 +佈 +位 +低 +住 +佐 +何 +佛 +作 +使 +來 +供 +依 +侯 +侵 +侶 +便 +俗 +保 +俠 +信 +修 +俱 +俳 +倉 +個 +倍 +倒 +候 +借 +値 +倫 +倭 +假 +偈 +偉 +偏 +停 +偶 +傅 +傑 +傳 +傷 +傾 +像 +僞 +僥 +僧 +價 +儀 +儉 +儒 +優 +儼 +兀 +允 +元 +兆 +先 +光 +克 +兒 +入 +內 +全 +八 +公 +六 +共 +兵 +其 +具 +典 +兼 +再 +冠 +冥 +冶 +准 +凞 +凡 +凱 +出 +函 +刀 +分 +刊 +刑 +列 +初 +判 +別 +利 +到 +制 +券 +刺 +刻 +則 +前 +剛 +副 +創 +劃 +劑 +力 +功 +加 +劣 +助 +劫 +勇 +動 +務 +勝 +勢 +勳 +勸 +匈 +化 +北 +匠 +區 +十 +千 +午 +半 +卍 +卑 +卒 +卓 +南 +博 +卜 +占 +卦 +印 +危 +卵 +卷 +卽 +卿 +厄 +原 +厦 +去 +參 +又 +叉 +友 +反 +叔 +受 +口 +古 +句 +可 +台 +史 +右 +司 +各 +合 +吉 +同 +名 +后 +吏 +吐 +君 +吠 +吳 +呂 +告 +周 +味 +呵 +命 +和 +咳 +咸 +咽 +哀 +品 +哨 +哮 +哲 +唐 +唯 +唱 +商 +問 +啼 +善 +喆 +喉 +喜 +喩 +喪 +嘗 +器 +嚴 +囊 +四 +回 +因 +困 +固 +圈 +國 +圍 +園 +圓 +圖 +團 +土 +在 +地 +均 +坊 +坐 +坑 +坵 +型 +垢 +城 +域 +埴 +執 +培 +基 +堂 +堅 +堆 +堤 +堯 +報 +場 +塔 +塚 +塞 +塵 +境 +墜 +墟 +墨 +墳 +墾 +壁 +壇 +壓 +壤 +士 +壬 +壯 +壺 +壽 +夏 +夕 +外 +多 +夜 +夢 +大 +天 +太 +夫 +央 +失 +夷 +奄 +奇 +奉 +奎 +奏 +契 +奔 +奮 +女 +奴 +好 +如 +妄 +妊 +妖 +妙 +始 +姑 +姓 +姚 +姜 +威 +婆 +婚 +婦 +媒 +媚 +子 +孔 +字 +存 +孝 +孟 +季 +孤 +孫 +學 +孺 +宇 +守 +安 +宋 +宗 +官 +宙 +定 +客 +宣 +室 +宮 +害 +家 +容 +寂 +寃 +寄 +寅 +密 +寇 +富 +寒 +寓 +實 +審 +寫 +寬 +寶 +寸 +寺 +封 +將 +專 +尊 +對 +小 +少 +尙 +尹 +尼 +尿 +局 +居 +屈 +屋 +屍 +屎 +屛 +層 +屬 +山 +岐 +岡 +岩 +岳 +岸 +峙 +峰 +島 +峻 +峽 +崇 +崔 +崖 +崩 +嶋 +巖 +川 +州 +巢 +工 +左 +巧 +巨 +巫 +差 +己 +巷 +市 +布 +帝 +師 +帶 +常 +帽 +幕 +干 +平 +年 +幹 +幻 +幼 +幽 +庇 +序 +店 +府 +度 +座 +庫 +庭 +康 +廟 +廣 +廳 +延 +廷 +建 +廻 +弁 +式 +弑 +弓 +引 +弘 +弟 +弱 +張 +强 +弼 +彌 +彛 +形 +彬 +影 +役 +彼 +彿 +往 +征 +待 +律 +後 +徐 +徑 +得 +從 +循 +微 +德 +徹 +心 +必 +忌 +忍 +志 +忠 +思 +怡 +急 +性 +恐 +恒 +恨 +恩 +悅 +悖 +患 +悲 +情 +惑 +惟 +惠 +惡 +想 +惺 +愁 +意 +愚 +愛 +感 +愼 +慈 +態 +慕 +慣 +慧 +慾 +憂 +憤 +憺 +應 +懸 +戎 +成 +我 +戟 +戮 +戰 +戴 +戶 +房 +所 +手 +才 +打 +批 +承 +技 +抄 +把 +抗 +抱 +抽 +拇 +拓 +拘 +拙 +拜 +拾 +持 +指 +捌 +捨 +捿 +授 +掌 +排 +接 +推 +提 +揚 +揭 +援 +損 +搗 +摩 +播 +操 +擒 +擔 +擘 +據 +擧 +攘 +攝 +攬 +支 +改 +攻 +放 +政 +故 +敍 +敎 +救 +敗 +散 +敬 +整 +數 +文 +斗 +料 +斛 +斜 +斧 +斯 +新 +斷 +方 +於 +施 +旋 +族 +旗 +日 +旨 +早 +旱 +昌 +明 +易 +昔 +星 +春 +昧 +昭 +是 +時 +晉 +晋 +晩 +普 +景 +晴 +晶 +智 +暈 +暑 +暗 +暘 +曉 +曜 +曠 +曦 +曰 +曲 +書 +曹 +曼 +曾 +最 +會 +月 +有 +朋 +服 +望 +朝 +期 +木 +未 +末 +本 +朱 +朴 +李 +材 +村 +杖 +杜 +杞 +杭 +杯 +東 +松 +板 +林 +果 +枝 +枯 +枰 +枾 +柏 +柑 +柱 +栗 +校 +栢 +核 +根 +格 +桀 +桂 +案 +桎 +桑 +桓 +桔 +梁 +梏 +梓 +梗 +條 +梨 +梵 +棗 +棟 +森 +植 +椒 +楊 +楓 +楚 +業 +楮 +極 +榮 +槃 +槍 +樂 +樓 +樗 +樣 +樸 +樹 +樺 +樽 +橄 +橋 +橘 +機 +橡 +檀 +檎 +權 +欌 +欖 +次 +欲 +歌 +歐 +止 +正 +此 +步 +武 +歲 +歸 +死 +殖 +段 +殷 +殺 +殿 +毅 +母 +毒 +比 +毛 +氏 +民 +氣 +水 +永 +求 +汎 +汗 +江 +池 +沅 +沒 +沖 +沙 +沛 +河 +油 +治 +沼 +沿 +泉 +泊 +法 +泗 +泡 +波 +注 +泰 +洋 +洙 +洛 +洞 +津 +洲 +活 +派 +流 +浅 +浦 +浮 +浴 +海 +涅 +涇 +消 +涌 +液 +淑 +淡 +淨 +淫 +深 +淳 +淵 +淸 +渠 +渡 +游 +渾 +湖 +湯 +源 +溪 +溫 +溶 +滄 +滅 +滋 +滯 +滿 +漁 +漆 +漢 +漫 +漸 +潑 +潤 +潭 +澄 +澎 +澤 +澳 +澹 +濁 +濕 +濟 +濤 +濯 +瀋 +瀝 +灣 +火 +灰 +灸 +災 +炎 +炭 +点 +烈 +烏 +烙 +焚 +無 +焦 +然 +煌 +煎 +照 +煬 +煮 +熟 +熱 +燁 +燈 +燔 +燕 +燥 +燧 +燮 +爲 +爵 +父 +片 +版 +牌 +牛 +牝 +牟 +牡 +物 +特 +犧 +犬 +狀 +狗 +猥 +猩 +猪 +獨 +獵 +獸 +獻 +玄 +玉 +王 +玲 +珍 +珠 +珪 +班 +現 +球 +理 +琴 +瑞 +瑟 +瑪 +璃 +璋 +璽 +瓜 +瓦 +甑 +甘 +生 +産 +用 +甫 +田 +由 +甲 +申 +男 +界 +畏 +留 +畜 +畢 +略 +番 +異 +畵 +當 +畸 +疏 +疑 +疫 +疹 +疼 +病 +症 +痔 +痛 +痺 +瘀 +瘍 +瘡 +療 +癌 +癖 +登 +發 +白 +百 +的 +皆 +皇 +皮 +盂 +盆 +益 +盛 +盜 +盟 +盡 +盤 +盧 +目 +直 +相 +省 +看 +眞 +眼 +睡 +督 +瞋 +矢 +矣 +知 +短 +石 +破 +碍 +碑 +磁 +磨 +磬 +示 +社 +祇 +祖 +祝 +神 +祥 +祭 +祺 +禁 +禅 +禍 +福 +禦 +禪 +禮 +禹 +禽 +禾 +秀 +私 +秉 +秋 +科 +秘 +秤 +秦 +秩 +移 +稀 +稗 +種 +稱 +稷 +稼 +稽 +穀 +穆 +積 +空 +窮 +竅 +立 +章 +童 +竭 +端 +竹 +笑 +符 +第 +筆 +等 +筍 +答 +策 +箋 +箕 +管 +箱 +節 +篇 +簡 +米 +粉 +粘 +粥 +精 +糖 +糞 +系 +紀 +紂 +約 +紅 +紋 +純 +紙 +級 +素 +索 +紫 +紬 +累 +細 +紳 +終 +組 +結 +絡 +統 +絲 +絶 +絹 +經 +綠 +維 +綱 +網 +綸 +綽 +緖 +線 +緣 +緯 +縣 +縱 +總 +織 +繡 +繩 +繪 +繭 +纂 +續 +罕 +置 +罰 +羅 +羊 +美 +群 +義 +羽 +翁 +習 +翟 +老 +考 +者 +而 +耐 +耕 +耳 +聃 +聖 +聞 +聰 +聲 +職 +肇 +肉 +肖 +肝 +股 +肥 +育 +肺 +胃 +胎 +胚 +胞 +胡 +胥 +能 +脂 +脈 +脚 +脛 +脣 +脩 +脫 +脯 +脾 +腋 +腎 +腫 +腸 +腹 +膜 +膠 +膨 +膽 +臆 +臟 +臣 +臥 +臨 +自 +至 +致 +臺 +臼 +臾 +與 +興 +舊 +舌 +舍 +舒 +舜 +舟 +般 +船 +艦 +良 +色 +芋 +花 +芳 +芽 +苑 +苔 +苕 +苛 +苞 +若 +苦 +英 +茂 +茵 +茶 +茹 +荀 +荇 +草 +荒 +荷 +莊 +莫 +菊 +菌 +菜 +菩 +菫 +華 +菴 +菽 +萊 +萍 +萬 +落 +葉 +著 +葛 +董 +葬 +蒙 +蒜 +蒲 +蒸 +蒿 +蓮 +蔓 +蔘 +蔡 +蔬 +蕃 +蕉 +蕓 +薄 +薑 +薛 +薩 +薪 +薺 +藏 +藝 +藤 +藥 +藩 +藻 +蘆 +蘇 +蘊 +蘚 +蘭 +虎 +處 +虛 +虞 +虹 +蜀 +蜂 +蜜 +蝕 +蝶 +融 +蟬 +蟲 +蠶 +蠻 +血 +衆 +行 +術 +衛 +衡 +衣 +表 +袁 +裔 +裕 +裙 +補 +製 +複 +襄 +西 +要 +見 +視 +親 +覺 +觀 +角 +解 +言 +訂 +訊 +訓 +託 +記 +訣 +設 +診 +註 +評 +詩 +話 +詵 +誅 +誌 +認 +誕 +語 +誠 +誤 +誥 +誦 +說 +調 +談 +諍 +論 +諡 +諫 +諭 +諸 +謙 +講 +謝 +謠 +證 +識 +譚 +譜 +譯 +議 +護 +讀 +變 +谷 +豆 +豊 +豚 +象 +豪 +豫 +貝 +貞 +財 +貧 +貨 +貪 +貫 +貴 +貸 +費 +資 +賊 +賓 +賞 +賢 +賣 +賦 +質 +贍 +赤 +赫 +走 +起 +超 +越 +趙 +趣 +趨 +足 +趾 +跋 +跡 +路 +踏 +蹟 +身 +躬 +車 +軍 +軒 +軟 +載 +輓 +輕 +輪 +輯 +輸 +輻 +輿 +轅 +轉 +辨 +辭 +辯 +辰 +農 +近 +迦 +述 +追 +逆 +透 +逐 +通 +逝 +造 +逢 +連 +進 +逵 +遂 +遊 +運 +遍 +過 +道 +達 +遠 +遡 +適 +遷 +選 +遺 +遽 +還 +邊 +邑 +那 +邪 +郞 +郡 +部 +都 +鄒 +鄕 +鄭 +鄲 +配 +酒 +酸 +醉 +醫 +醯 +釋 +里 +重 +野 +量 +釐 +金 +針 +鈍 +鈴 +鉞 +銀 +銅 +銘 +鋼 +錄 +錢 +錦 +鎭 +鏡 +鐘 +鐵 +鑑 +鑛 +長 +門 +閃 +開 +間 +閔 +閣 +閥 +閭 +閻 +闕 +關 +阪 +防 +阿 +陀 +降 +限 +陝 +院 +陰 +陳 +陵 +陶 +陸 +陽 +隆 +隊 +隋 +階 +際 +障 +隣 +隨 +隱 +隷 +雀 +雄 +雅 +集 +雇 +雌 +雖 +雙 +雜 +離 +難 +雨 +雪 +雲 +電 +霜 +露 +靈 +靑 +靖 +靜 +非 +面 +革 +靴 +鞏 +韓 +音 +韶 +韻 +順 +須 +頊 +頌 +領 +頭 +顔 +願 +顚 +類 +顯 +風 +飛 +食 +飢 +飮 +飯 +飾 +養 +餓 +餘 +首 +香 +馨 +馬 +駒 +騫 +騷 +驕 +骨 +骸 +髓 +體 +高 +髥 +髮 +鬪 +鬱 +鬼 +魏 +魔 +魚 +魯 +鮮 +鰍 +鰐 +鳥 +鳧 +鳳 +鴨 +鵲 +鶴 +鷄 +鷹 +鹽 +鹿 +麗 +麥 +麻 +黃 +黑 +默 +點 +黨 +鼎 +齊 +齋 +齒 +龍 +龜 +가 +각 +간 +갇 +갈 +갉 +감 +갑 +값 +갓 +갔 +강 +갖 +갗 +같 +갚 +갛 +개 +객 +갠 +갤 +갬 +갭 +갯 +갰 +갱 +갸 +걀 +걔 +걘 +거 +걱 +건 +걷 +걸 +검 +겁 +것 +겄 +겅 +겆 +겉 +겊 +겋 +게 +겐 +겔 +겟 +겠 +겡 +겨 +격 +겪 +견 +결 +겸 +겹 +겻 +겼 +경 +곁 +계 +곕 +곗 +고 +곡 +곤 +곧 +골 +곪 +곬 +곯 +곰 +곱 +곳 +공 +곶 +과 +곽 +관 +괄 +괌 +광 +괘 +괜 +괭 +괴 +괸 +굉 +교 +구 +국 +군 +굳 +굴 +굵 +굶 +굼 +굽 +굿 +궁 +궂 +궈 +권 +궐 +궜 +궝 +궤 +귀 +귄 +귈 +귓 +규 +균 +귤 +그 +극 +근 +글 +긁 +금 +급 +긋 +긍 +기 +긴 +길 +김 +깁 +깃 +깅 +깊 +까 +깍 +깎 +깐 +깔 +깜 +깝 +깟 +깡 +깥 +깨 +깬 +깰 +깻 +깼 +깽 +꺄 +꺼 +꺽 +꺾 +껀 +껄 +껌 +껍 +껏 +껐 +껑 +께 +껴 +꼈 +꼍 +꼐 +꼬 +꼭 +꼴 +꼼 +꼽 +꼿 +꽁 +꽂 +꽃 +꽉 +꽝 +꽤 +꽥 +꾀 +꾜 +꾸 +꾹 +꾼 +꿀 +꿇 +꿈 +꿉 +꿋 +꿍 +꿎 +꿔 +꿨 +꿩 +꿰 +꿴 +뀄 +뀌 +뀐 +뀔 +뀜 +뀝 +끄 +끈 +끊 +끌 +끓 +끔 +끕 +끗 +끙 +끝 +끼 +끽 +낀 +낄 +낌 +낍 +낏 +낑 +나 +낙 +낚 +난 +낟 +날 +낡 +남 +납 +낫 +났 +낭 +낮 +낯 +낱 +낳 +내 +낵 +낸 +낼 +냄 +냅 +냇 +냈 +냉 +냐 +냔 +냘 +냥 +너 +넉 +넋 +넌 +널 +넓 +넘 +넙 +넛 +넜 +넝 +넣 +네 +넥 +넨 +넬 +넴 +넵 +넷 +넸 +넹 +녀 +녁 +년 +념 +녔 +녕 +녘 +녜 +노 +녹 +논 +놀 +놈 +놋 +농 +높 +놓 +놔 +놨 +뇌 +뇨 +뇩 +뇽 +누 +눅 +눈 +눌 +눔 +눕 +눗 +눠 +눴 +뉘 +뉜 +뉩 +뉴 +늄 +늅 +늉 +느 +늑 +는 +늘 +늙 +늠 +늡 +능 +늦 +늪 +늬 +니 +닉 +닌 +닐 +님 +닙 +닛 +닝 +닢 +다 +닥 +닦 +단 +닫 +달 +닭 +닮 +닯 +닳 +담 +답 +닷 +당 +닻 +닿 +대 +댁 +댄 +댈 +댐 +댑 +댓 +댔 +댕 +댜 +더 +덕 +덖 +던 +덜 +덟 +덤 +덥 +덧 +덩 +덫 +덮 +데 +덱 +덴 +델 +뎀 +뎃 +뎅 +뎌 +뎠 +뎨 +도 +독 +돈 +돋 +돌 +돔 +돕 +돗 +동 +돛 +돝 +돼 +됐 +되 +된 +될 +됨 +됩 +됴 +두 +둑 +둔 +둘 +둠 +둡 +둣 +둥 +둬 +뒀 +뒤 +뒬 +뒷 +뒹 +듀 +듈 +듐 +드 +득 +든 +듣 +들 +듦 +듬 +듭 +듯 +등 +듸 +디 +딕 +딘 +딛 +딜 +딤 +딥 +딧 +딨 +딩 +딪 +따 +딱 +딴 +딸 +땀 +땄 +땅 +때 +땐 +땔 +땜 +땝 +땠 +땡 +떠 +떡 +떤 +떨 +떫 +떰 +떱 +떳 +떴 +떵 +떻 +떼 +떽 +뗀 +뗄 +뗍 +뗏 +뗐 +뗑 +또 +똑 +똘 +똥 +뙤 +뚜 +뚝 +뚤 +뚫 +뚱 +뛰 +뛴 +뛸 +뜀 +뜁 +뜨 +뜩 +뜬 +뜯 +뜰 +뜸 +뜻 +띄 +띈 +띌 +띔 +띕 +띠 +띤 +띨 +띱 +띵 +라 +락 +란 +랄 +람 +랍 +랏 +랐 +랑 +랒 +랗 +래 +랙 +랜 +랠 +램 +랩 +랫 +랬 +랭 +랴 +략 +량 +러 +럭 +런 +럴 +럼 +럽 +럿 +렀 +렁 +렇 +레 +렉 +렌 +렐 +렘 +렙 +렛 +렝 +려 +력 +련 +렬 +렴 +렵 +렷 +렸 +령 +례 +로 +록 +론 +롤 +롬 +롭 +롯 +롱 +롸 +롹 +뢰 +뢴 +뢸 +룃 +료 +룐 +룡 +루 +룩 +룬 +룰 +룸 +룹 +룻 +룽 +뤄 +뤘 +뤼 +류 +륙 +륜 +률 +륨 +륭 +르 +륵 +른 +를 +름 +릅 +릇 +릉 +릎 +리 +릭 +린 +릴 +림 +립 +릿 +링 +마 +막 +만 +많 +맏 +말 +맑 +맘 +맙 +맛 +망 +맞 +맡 +맣 +매 +맥 +맨 +맬 +맴 +맵 +맷 +맸 +맹 +맺 +먀 +먁 +머 +먹 +먼 +멀 +멈 +멋 +멍 +멎 +메 +멕 +멘 +멜 +멤 +멥 +멧 +멩 +며 +멱 +면 +멸 +몄 +명 +몇 +모 +목 +몫 +몬 +몰 +몸 +몹 +못 +몽 +뫼 +묘 +무 +묵 +묶 +문 +묻 +물 +묽 +뭄 +뭅 +뭇 +뭉 +뭍 +뭏 +뭐 +뭔 +뭘 +뭡 +뭣 +뮈 +뮌 +뮐 +뮤 +뮬 +므 +믈 +믐 +미 +믹 +민 +믿 +밀 +밈 +밉 +밋 +밌 +밍 +및 +밑 +바 +박 +밖 +반 +받 +발 +밝 +밟 +밤 +밥 +밧 +방 +밭 +배 +백 +밴 +밸 +뱀 +뱁 +뱃 +뱄 +뱅 +뱉 +뱍 +뱐 +버 +벅 +번 +벌 +범 +법 +벗 +벙 +벚 +베 +벡 +벤 +벨 +벰 +벱 +벳 +벵 +벼 +벽 +변 +별 +볍 +볏 +볐 +병 +볕 +보 +복 +볶 +본 +볼 +봄 +봅 +봇 +봉 +봐 +봤 +뵈 +뵐 +뵙 +부 +북 +분 +붇 +불 +붉 +붐 +붓 +붕 +붙 +뷔 +뷰 +뷴 +뷸 +브 +븐 +블 +비 +빅 +빈 +빌 +빔 +빕 +빗 +빙 +빚 +빛 +빠 +빡 +빤 +빨 +빳 +빴 +빵 +빻 +빼 +빽 +뺀 +뺄 +뺌 +뺏 +뺐 +뺑 +뺨 +뻐 +뻑 +뻔 +뻗 +뻘 +뻣 +뻤 +뻥 +뻬 +뼈 +뼉 +뼘 +뽀 +뽈 +뽐 +뽑 +뽕 +뾰 +뿌 +뿍 +뿐 +뿔 +뿜 +쁘 +쁜 +쁠 +쁨 +삐 +삔 +삘 +사 +삭 +삯 +산 +살 +삵 +삶 +삼 +삽 +삿 +샀 +상 +샅 +새 +색 +샌 +샐 +샘 +샙 +샛 +샜 +생 +샤 +샨 +샬 +샴 +샵 +샷 +샹 +서 +석 +섞 +선 +섣 +설 +섬 +섭 +섯 +섰 +성 +섶 +세 +섹 +센 +셀 +셈 +셉 +셋 +셌 +셍 +셔 +션 +셜 +셨 +셰 +셴 +셸 +소 +속 +손 +솔 +솜 +솝 +솟 +송 +솥 +쇄 +쇠 +쇤 +쇳 +쇼 +숀 +숄 +숍 +수 +숙 +순 +숟 +술 +숨 +숩 +숫 +숭 +숯 +숱 +숲 +숴 +쉐 +쉘 +쉬 +쉭 +쉰 +쉴 +쉼 +쉽 +슈 +슐 +슘 +슛 +슝 +스 +슥 +슨 +슬 +슭 +슴 +습 +슷 +승 +시 +식 +신 +싣 +실 +싫 +심 +십 +싯 +싱 +싶 +싸 +싹 +싼 +쌀 +쌈 +쌉 +쌌 +쌍 +쌓 +쌔 +쌘 +쌩 +써 +썩 +썬 +썰 +썸 +썹 +썼 +썽 +쎄 +쎈 +쏘 +쏙 +쏜 +쏟 +쏠 +쏭 +쏴 +쐈 +쐐 +쐬 +쑤 +쑥 +쑨 +쒀 +쒔 +쓰 +쓱 +쓴 +쓸 +씀 +씁 +씌 +씨 +씩 +씬 +씰 +씸 +씹 +씻 +씽 +아 +악 +안 +앉 +않 +알 +앎 +앓 +암 +압 +앗 +았 +앙 +앞 +애 +액 +앤 +앨 +앰 +앱 +앳 +앴 +앵 +야 +약 +얀 +얄 +얇 +얌 +얍 +얏 +양 +얕 +얗 +얘 +얜 +어 +억 +언 +얹 +얻 +얼 +얽 +엄 +업 +없 +엇 +었 +엉 +엊 +엌 +엎 +에 +엑 +엔 +엘 +엠 +엡 +엣 +엥 +여 +역 +엮 +연 +열 +엷 +염 +엽 +엾 +엿 +였 +영 +옅 +옆 +옇 +예 +옌 +옐 +옙 +옛 +오 +옥 +온 +올 +옭 +옮 +옳 +옴 +옵 +옷 +옹 +옻 +와 +왁 +완 +왈 +왑 +왓 +왔 +왕 +왜 +왠 +왱 +외 +왼 +요 +욕 +욘 +욜 +욤 +용 +우 +욱 +운 +울 +움 +웁 +웃 +웅 +워 +웍 +원 +월 +웜 +웠 +웡 +웨 +웬 +웰 +웸 +웹 +위 +윅 +윈 +윌 +윔 +윗 +윙 +유 +육 +윤 +율 +윱 +윳 +융 +으 +윽 +은 +을 +읊 +음 +읍 +응 +의 +읜 +읠 +이 +익 +인 +일 +읽 +잃 +임 +입 +잇 +있 +잉 +잊 +잎 +자 +작 +잔 +잖 +잘 +잠 +잡 +잣 +잤 +장 +잦 +재 +잭 +잰 +잴 +잽 +잿 +쟀 +쟁 +쟈 +쟉 +쟤 +저 +적 +전 +절 +젊 +점 +접 +젓 +정 +젖 +제 +젝 +젠 +젤 +젬 +젭 +젯 +져 +젼 +졀 +졌 +졍 +조 +족 +존 +졸 +좀 +좁 +종 +좇 +좋 +좌 +좍 +좽 +죄 +죠 +죤 +주 +죽 +준 +줄 +줌 +줍 +줏 +중 +줘 +줬 +쥐 +쥔 +쥘 +쥬 +쥴 +즈 +즉 +즌 +즐 +즘 +즙 +증 +지 +직 +진 +짇 +질 +짊 +짐 +집 +짓 +징 +짖 +짙 +짚 +짜 +짝 +짠 +짢 +짤 +짧 +짬 +짭 +짰 +짱 +째 +짹 +짼 +쨀 +쨉 +쨋 +쨌 +쨍 +쩄 +쩌 +쩍 +쩐 +쩔 +쩜 +쩝 +쩡 +쩨 +쪄 +쪘 +쪼 +쪽 +쪾 +쫀 +쫄 +쫑 +쫓 +쫙 +쬐 +쭈 +쭉 +쭐 +쭙 +쯔 +쯤 +쯧 +찌 +찍 +찐 +찔 +찜 +찝 +찡 +찢 +찧 +차 +착 +찬 +찮 +찰 +참 +찹 +찻 +찼 +창 +찾 +채 +책 +챈 +챌 +챔 +챕 +챗 +챘 +챙 +챠 +챤 +처 +척 +천 +철 +첨 +첩 +첫 +청 +체 +첵 +첸 +첼 +쳄 +쳇 +쳉 +쳐 +쳔 +쳤 +초 +촉 +촌 +촘 +촛 +총 +촨 +촬 +최 +쵸 +추 +축 +춘 +출 +춤 +춥 +춧 +충 +춰 +췄 +췌 +취 +췬 +츄 +츠 +측 +츨 +츰 +층 +치 +칙 +친 +칠 +칡 +침 +칩 +칫 +칭 +카 +칵 +칸 +칼 +캄 +캅 +캇 +캉 +캐 +캔 +캘 +캠 +캡 +캣 +캤 +캥 +캬 +커 +컥 +컨 +컫 +컬 +컴 +컵 +컷 +컸 +컹 +케 +켄 +켈 +켐 +켓 +켕 +켜 +켠 +켤 +켭 +켯 +켰 +코 +콕 +콘 +콜 +콤 +콥 +콧 +콩 +콰 +콱 +콴 +콸 +쾅 +쾌 +쾡 +쾨 +쾰 +쿄 +쿠 +쿡 +쿤 +쿨 +쿰 +쿵 +쿼 +퀀 +퀄 +퀘 +퀭 +퀴 +퀵 +퀸 +퀼 +큐 +큘 +크 +큰 +클 +큼 +큽 +키 +킥 +킨 +킬 +킴 +킵 +킷 +킹 +타 +탁 +탄 +탈 +탉 +탐 +탑 +탓 +탔 +탕 +태 +택 +탠 +탤 +탬 +탭 +탯 +탰 +탱 +터 +턱 +턴 +털 +텀 +텁 +텃 +텄 +텅 +테 +텍 +텐 +텔 +템 +텝 +텡 +텨 +톈 +토 +톡 +톤 +톨 +톰 +톱 +톳 +통 +퇴 +툇 +투 +툭 +툰 +툴 +툼 +퉁 +퉈 +퉜 +튀 +튄 +튈 +튕 +튜 +튠 +튤 +튬 +트 +특 +튼 +튿 +틀 +틈 +틉 +틋 +틔 +티 +틱 +틴 +틸 +팀 +팁 +팅 +파 +팍 +팎 +판 +팔 +팜 +팝 +팟 +팠 +팡 +팥 +패 +팩 +팬 +팰 +팸 +팻 +팼 +팽 +퍼 +퍽 +펀 +펄 +펌 +펍 +펐 +펑 +페 +펙 +펜 +펠 +펨 +펩 +펫 +펭 +펴 +편 +펼 +폄 +폈 +평 +폐 +포 +폭 +폰 +폴 +폼 +폿 +퐁 +표 +푭 +푸 +푹 +푼 +풀 +품 +풋 +풍 +퓨 +퓬 +퓰 +퓸 +프 +픈 +플 +픔 +픕 +피 +픽 +핀 +필 +핌 +핍 +핏 +핑 +하 +학 +한 +할 +핥 +함 +합 +핫 +항 +해 +핵 +핸 +핼 +햄 +햅 +햇 +했 +행 +햐 +향 +헀 +허 +헉 +헌 +헐 +험 +헙 +헛 +헝 +헤 +헥 +헨 +헬 +헴 +헵 +헷 +헹 +혀 +혁 +현 +혈 +혐 +협 +혓 +혔 +형 +혜 +호 +혹 +혼 +홀 +홈 +홉 +홋 +홍 +홑 +화 +확 +환 +활 +홧 +황 +홰 +홱 +횃 +회 +획 +횝 +횟 +횡 +효 +후 +훅 +훈 +훌 +훑 +훔 +훗 +훤 +훨 +훼 +휄 +휑 +휘 +휙 +휜 +휠 +휩 +휭 +휴 +휼 +흄 +흉 +흐 +흑 +흔 +흘 +흙 +흠 +흡 +흣 +흥 +흩 +희 +흰 +흽 +히 +힉 +힌 +힐 +힘 +힙 +힝 +車 +滑 +金 +奈 +羅 +洛 +卵 +欄 +蘭 +郎 +來 +盧 +老 +魯 +綠 +鹿 +論 +雷 +樓 +縷 +凌 +樂 +不 +參 +葉 +沈 +若 +兩 +凉 +梁 +呂 +女 +廬 +麗 +黎 +曆 +歷 +戀 +蓮 +連 +列 +烈 +裂 +念 +獵 +靈 +領 +例 +禮 +醴 +惡 +尿 +料 +遼 +龍 +暈 +柳 +流 +類 +六 +陸 +倫 +律 +栗 +利 +李 +梨 +理 +離 +燐 +林 +臨 +立 +茶 +切 +宅 + diff --git a/modules/pytorchocr/utils/dict/latex_symbol_dict.txt b/modules/pytorchocr/utils/dict/latex_symbol_dict.txt new file mode 100644 index 0000000..d17f2c2 --- /dev/null +++ b/modules/pytorchocr/utils/dict/latex_symbol_dict.txt @@ -0,0 +1,111 @@ +eos +sos +! +' +( +) ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +< += +> +A +B +C +E +F +G +H +I +L +M +N +P +R +S +T +V +X +Y +[ +\Delta +\alpha +\beta +\cdot +\cdots +\cos +\div +\exists +\forall +\frac +\gamma +\geq +\in +\infty +\int +\lambda +\ldots +\leq +\lim +\log +\mu +\neq +\phi +\pi +\pm +\prime +\rightarrow +\sigma +\sin +\sqrt +\sum +\tan +\theta +\times +] +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +\{ +| +\} +{ +} +^ +_ \ No newline at end of file diff --git a/modules/pytorchocr/utils/dict/latin_dict.txt b/modules/pytorchocr/utils/dict/latin_dict.txt new file mode 100644 index 0000000..e166bf3 --- /dev/null +++ b/modules/pytorchocr/utils/dict/latin_dict.txt @@ -0,0 +1,185 @@ + +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +] +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +} +¡ +£ +§ +ª +« +­ +° +² +³ +´ +µ +· +º +» +¿ +À +Á + +Ä +Å +Ç +È +É +Ê +Ë +Ì +Í +Î +Ï +Ò +Ó +Ô +Õ +Ö +Ú +Ü +Ý +ß +à +á +â +ã +ä +å +æ +ç +è +é +ê +ë +ì +í +î +ï +ñ +ò +ó +ô +õ +ö +ø +ù +ú +û +ü +ý +ą +Ć +ć +Č +č +Đ +đ +ę +ı +Ł +ł +ō +Œ +œ +Š +š +Ÿ +Ž +ž +ʒ +β +δ +ε +з +Ṡ +‘ +€ +™ diff --git a/modules/pytorchocr/utils/dict/mr_dict.txt b/modules/pytorchocr/utils/dict/mr_dict.txt new file mode 100644 index 0000000..283b150 --- /dev/null +++ b/modules/pytorchocr/utils/dict/mr_dict.txt @@ -0,0 +1,153 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +ँ +ं +ः +अ +आ +इ +ई +उ +ऊ +ए +ऐ +ऑ +ओ +औ +क +ख +ग +घ +च +छ +ज +झ +ञ +ट +ठ +ड +ढ +ण +त +थ +द +ध +न +प +फ +ब +भ +म +य +र +ऱ +ल +ळ +व +श +ष +स +ह +़ +ा +ि +ी +ु +ू +ृ +ॅ +े +ै +ॉ +ो +ौ +् +० +१ +२ +३ +४ +५ +६ +७ +८ +९ diff --git a/modules/pytorchocr/utils/dict/ne_dict.txt b/modules/pytorchocr/utils/dict/ne_dict.txt new file mode 100644 index 0000000..5a7df95 --- /dev/null +++ b/modules/pytorchocr/utils/dict/ne_dict.txt @@ -0,0 +1,153 @@ + +! +# +$ +% +& +' +( ++ +, +- +. +/ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +_ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +É +é +ः +अ +आ +इ +ई +उ +ऊ +ऋ +ए +ऐ +ओ +औ +क +ख +ग +घ +ङ +च +छ +ज +झ +ञ +ट +ठ +ड +ढ +ण +त +थ +द +ध +न +ऩ +प +फ +ब +भ +म +य +र +ऱ +ल +व +श +ष +स +ह +़ +ा +ि +ी +ु +ू +ृ +े +ै +ो +ौ +् +॒ +ॠ +। +० +१ +२ +३ +४ +५ +६ +७ +८ +९ diff --git a/modules/pytorchocr/utils/dict/oc_dict.txt b/modules/pytorchocr/utils/dict/oc_dict.txt new file mode 100644 index 0000000..e88af8b --- /dev/null +++ b/modules/pytorchocr/utils/dict/oc_dict.txt @@ -0,0 +1,96 @@ +o +c +_ +i +m +g +/ +2 +0 +I +L +S +V +R +C +1 +v +a +l +4 +3 +. +j +p +r +e +è +t +9 +7 +5 +8 +n +' +b +s +6 +q +u +á +d +ò +à +h +z +f +ï +í +A +ç +x +ó +é +P +O +Ò +ü +k +À +F +- +ú +­ +æ +Á +D +E +w +K +T +N +y +U +Z +G +B +J +H +M +W +Y +X +Q +% +$ +, +@ +& +! +: +( +# +? ++ +É + diff --git a/modules/pytorchocr/utils/dict/pt_dict.txt b/modules/pytorchocr/utils/dict/pt_dict.txt new file mode 100644 index 0000000..9500fae --- /dev/null +++ b/modules/pytorchocr/utils/dict/pt_dict.txt @@ -0,0 +1,130 @@ +p +u +_ +i +m +g +/ +8 +I +L +S +V +R +C +2 +0 +1 +v +a +l +6 +7 +4 +5 +. +j + +q +e +s +t +ã +o +x +9 +c +n +r +z +ç +õ +3 +A +U +d +º +ô +­ +, +E +; +ó +á +b +D +? +ú +ê +- +h +P +f +à +N +í +O +M +G +É +é +â +F +: +T +Á +" +Q +) +W +J +B +H +( +ö +% +Ö +« +w +K +y +! +k +] +' +Z ++ +Ç +Õ +Y +À +X +µ +» +ª +Í +ü +ä +´ +è +ñ +ß +ï +Ú +ë +Ô +Ï +Ó +[ +Ì +< + +ò +§ +³ +ø +å +# +$ +& +@ diff --git a/modules/pytorchocr/utils/dict/pu_dict.txt b/modules/pytorchocr/utils/dict/pu_dict.txt new file mode 100644 index 0000000..9500fae --- /dev/null +++ b/modules/pytorchocr/utils/dict/pu_dict.txt @@ -0,0 +1,130 @@ +p +u +_ +i +m +g +/ +8 +I +L +S +V +R +C +2 +0 +1 +v +a +l +6 +7 +4 +5 +. +j + +q +e +s +t +ã +o +x +9 +c +n +r +z +ç +õ +3 +A +U +d +º +ô +­ +, +E +; +ó +á +b +D +? +ú +ê +- +h +P +f +à +N +í +O +M +G +É +é +â +F +: +T +Á +" +Q +) +W +J +B +H +( +ö +% +Ö +« +w +K +y +! +k +] +' +Z ++ +Ç +Õ +Y +À +X +µ +» +ª +Í +ü +ä +´ +è +ñ +ß +ï +Ú +ë +Ô +Ï +Ó +[ +Ì +< + +ò +§ +³ +ø +å +# +$ +& +@ diff --git a/modules/pytorchocr/utils/dict/rs_cyrillic_dict.txt b/modules/pytorchocr/utils/dict/rs_cyrillic_dict.txt new file mode 100644 index 0000000..95dd463 --- /dev/null +++ b/modules/pytorchocr/utils/dict/rs_cyrillic_dict.txt @@ -0,0 +1,134 @@ +r +s +c +_ +i +m +g +/ +5 +I +L +S +V +R +C +2 +0 +1 +v +a +l +9 +7 +8 +. +j +p +м +а +с +и +р +ћ +е +ш +3 +4 +о +г +н +з +в +л +6 +т +ж +у +к +п +њ +д +ч +С +ј +ф +ц +љ +х +О +И +А +б +Ш +К +ђ +џ +М +В +З +Д +Р +У +Н +Т +Б +? +П +Х +Ј +Ц +Г +Љ +Л +Ф +e +n +w +E +F +A +N +f +o +b +M +G +t +y +W +k +P +u +H +B +T +z +h +O +Y +d +U +K +D +x +X +J +Z +Q +q +' +- +@ +é +# +! +, +% +$ +: +& ++ +( +É + diff --git a/modules/pytorchocr/utils/dict/rs_dict.txt b/modules/pytorchocr/utils/dict/rs_dict.txt new file mode 100644 index 0000000..d1ce46d --- /dev/null +++ b/modules/pytorchocr/utils/dict/rs_dict.txt @@ -0,0 +1,91 @@ +r +s +_ +i +m +g +/ +1 +I +L +S +V +R +C +2 +0 +v +a +l +7 +5 +8 +6 +. +j +p + +t +d +9 +3 +e +š +4 +k +u +ć +c +n +đ +o +z +č +b +ž +f +Z +T +h +M +F +O +Š +B +H +A +E +Đ +Ž +D +P +G +Č +K +U +N +J +Ć +w +y +W +x +Y +X +q +Q +# +& +$ +, +- +% +' +@ +! +: +? +( +É +é ++ diff --git a/modules/pytorchocr/utils/dict/rs_latin_dict.txt b/modules/pytorchocr/utils/dict/rs_latin_dict.txt new file mode 100644 index 0000000..d1ce46d --- /dev/null +++ b/modules/pytorchocr/utils/dict/rs_latin_dict.txt @@ -0,0 +1,91 @@ +r +s +_ +i +m +g +/ +1 +I +L +S +V +R +C +2 +0 +v +a +l +7 +5 +8 +6 +. +j +p + +t +d +9 +3 +e +š +4 +k +u +ć +c +n +đ +o +z +č +b +ž +f +Z +T +h +M +F +O +Š +B +H +A +E +Đ +Ž +D +P +G +Č +K +U +N +J +Ć +w +y +W +x +Y +X +q +Q +# +& +$ +, +- +% +' +@ +! +: +? +( +É +é ++ diff --git a/modules/pytorchocr/utils/dict/rsc_dict.txt b/modules/pytorchocr/utils/dict/rsc_dict.txt new file mode 100644 index 0000000..95dd463 --- /dev/null +++ b/modules/pytorchocr/utils/dict/rsc_dict.txt @@ -0,0 +1,134 @@ +r +s +c +_ +i +m +g +/ +5 +I +L +S +V +R +C +2 +0 +1 +v +a +l +9 +7 +8 +. +j +p +м +а +с +и +р +ћ +е +ш +3 +4 +о +г +н +з +в +л +6 +т +ж +у +к +п +њ +д +ч +С +ј +ф +ц +љ +х +О +И +А +б +Ш +К +ђ +џ +М +В +З +Д +Р +У +Н +Т +Б +? +П +Х +Ј +Ц +Г +Љ +Л +Ф +e +n +w +E +F +A +N +f +o +b +M +G +t +y +W +k +P +u +H +B +T +z +h +O +Y +d +U +K +D +x +X +J +Z +Q +q +' +- +@ +é +# +! +, +% +$ +: +& ++ +( +É + diff --git a/modules/pytorchocr/utils/dict/ru_dict.txt b/modules/pytorchocr/utils/dict/ru_dict.txt new file mode 100644 index 0000000..3b0cf3a --- /dev/null +++ b/modules/pytorchocr/utils/dict/ru_dict.txt @@ -0,0 +1,125 @@ +к +в +а +з +и +у +р +о +н +я +х +п +л +ы +г +е +т +м +д +ж +ш +ь +с +ё +б +й +ч +ю +ц +щ +М +э +ф +А +ъ +С +Ф +Ю +В +К +Т +Н +О +Э +У +И +Г +Л +Р +Д +Б +Ш +П +З +Х +Е +Ж +Я +Ц +Ч +Й +Щ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z + diff --git a/modules/pytorchocr/utils/dict/ta_dict.txt b/modules/pytorchocr/utils/dict/ta_dict.txt new file mode 100644 index 0000000..d1bae50 --- /dev/null +++ b/modules/pytorchocr/utils/dict/ta_dict.txt @@ -0,0 +1,128 @@ +t +a +_ +i +m +g +/ +3 +I +L +S +V +R +C +2 +0 +1 +v +l +9 +7 +8 +. +j +p +ப +ூ +த +ம +ி +வ +ர +் +ந +ோ +ன +6 +ஆ +ற +ல +5 +ள +ா +ொ +ழ +ு +4 +ெ +ண +க +ட +ை +ே +ச +ய +ஒ +இ +அ +ங +உ +ீ +ஞ +எ +ஓ +ஃ +ஜ +ஷ +ஸ +ஏ +ஊ +ஹ +ஈ +ஐ +ௌ +ஔ +s +c +e +n +w +F +T +O +P +K +A +N +G +Y +E +M +H +U +B +o +b +D +d +r +W +u +y +f +X +k +q +h +J +z +Z +Q +x +- +' +$ +, +% +@ +é +! +# ++ +É +& +: +( +? + diff --git a/modules/pytorchocr/utils/dict/table_dict.txt b/modules/pytorchocr/utils/dict/table_dict.txt new file mode 100644 index 0000000..2ef028c --- /dev/null +++ b/modules/pytorchocr/utils/dict/table_dict.txt @@ -0,0 +1,277 @@ +← + +☆ +─ +α + + +⋅ +$ +ω +ψ +χ +( +υ +≥ +σ +, +ρ +ε +0 +■ +4 +8 +✗ +b +< +✓ +Ψ +Ω +€ +D +3 +Π +H +║ + +L +Φ +Χ +θ +P +κ +λ +μ +T +ξ +X +β +γ +δ +\ +ζ +η +` +d + +h +f +l +Θ +p +√ +t + +x +Β +Γ +Δ +| +ǂ +ɛ +j +̧ +➢ +⁡ +̌ +′ +« +△ +▲ +# + +' +Ι ++ +¶ +/ +▼ +⇑ +□ +· +7 +▪ +; +? +➔ +∩ +C +÷ +G +⇒ +K + +O +S +С +W +Α +[ +○ +_ +● +‡ +c +z +g + +o + +〈 +〉 +s +⩽ +w +φ +ʹ +{ +» +∣ +̆ +e +ˆ +∈ +τ +◆ +ι +∅ +∆ +∙ +∘ +Ø +ß +✔ +∞ +∑ +− +× +◊ +∗ +∖ +˃ +˂ +∫ +" +i +& +π +↔ +* +∥ +æ +∧ +. +⁄ +ø +Q +∼ +6 +⁎ +: +★ +> +a +B +≈ +F +J +̄ +N +♯ +R +V + +― +Z +♣ +^ +¤ +¥ +§ + +¢ +£ +≦ +­ +≤ +‖ +Λ +© +n +↓ +→ +↑ +r +° +± +v + +♂ +k +♀ +~ +ᅟ +̇ +@ +” +♦ +ł +® +⊕ +„ +! + +% +⇓ +) +- +1 +5 +9 += +А +A +‰ +⋆ +Σ +E +◦ +I +※ +M +m +̨ +⩾ +† + +• +U +Y +
 +] +̸ +2 +‐ +– +‒ +̂ +— +̀ +́ +’ +‘ +⋮ +⋯ +̊ +“ +̈ +≧ +q +u +ı +y + +​ +̃ +} +ν diff --git a/modules/pytorchocr/utils/dict/table_structure_dict.txt b/modules/pytorchocr/utils/dict/table_structure_dict.txt new file mode 100644 index 0000000..9c4531e --- /dev/null +++ b/modules/pytorchocr/utils/dict/table_structure_dict.txt @@ -0,0 +1,2759 @@ +277 28 1267 1186 + +V +a +r +i +b +l +e + +H +z +d + +t +o +9 +5 +% +C +I + +p + +v +u +* +A +g +( +m +n +) +0 +. +7 +1 +6 +≤ +> +8 +3 +– +2 +G +4 +M +F +T +y +f +s +L +w +c +U +h +D +S +Q +R +x +P +- +E +O +/ +k +, ++ +N +K +q +′ +[ +] +< +≥ + +− + +μ +± +J +j +W +_ +Δ +B +“ +: +Y +α +λ +; + + +? +∼ += +° +# +̊ +̈ +̂ +’ +Z +X +∗ +— +β +' +† +~ +@ +" +γ +↓ +↑ +& +‡ +χ +” +σ +§ +| +¶ +‐ +× +$ +→ +√ +✓ +‘ +\ +∞ +π +• +® +^ +∆ +≧ + + +́ +♀ +♂ +‒ +⁎ +▲ +· +£ +φ +Ψ +ß +△ +☆ +▪ +η +€ +∧ +̃ +Φ +ρ +̄ +δ +‰ +̧ +Ω +♦ +{ +} +̀ +∑ +∫ +ø +κ +ε +¥ +※ +` +ω +Σ +➔ +‖ +Β +̸ +
 +─ +● +⩾ +Χ +Α +⋅ +◆ +★ +■ +ψ +ǂ +□ +ζ +! +Γ +↔ +θ +⁄ +〈 +〉 +― +υ +τ +⋆ +Ø +© +∥ +С +˂ +➢ +ɛ +⁡ +✗ +← +○ +¢ +⩽ +∖ +˃ +­ +≈ +Π +̌ +≦ +∅ +ᅟ + + +∣ +¤ +♯ +̆ +ξ +÷ +▼ + +ι +ν +║ + + +◦ +​ +◊ +∙ +« +» +ł +ı +Θ +∈ +„ +∘ +✔ +̇ +æ +ʹ +ˆ +♣ +⇓ +∩ +⊕ +⇒ +⇑ +̨ +Ι +Λ +⋯ +А +⋮ + + + + + + + + + + colspan="2" + colspan="3" + rowspan="2" + colspan="4" + colspan="6" + rowspan="3" + colspan="9" + colspan="10" + colspan="7" + rowspan="4" + rowspan="5" + rowspan="9" + colspan="8" + rowspan="8" + rowspan="6" + rowspan="7" + rowspan="10" +0 2924682 +1 3405345 +2 2363468 +3 2709165 +4 4078680 +5 3250792 +6 1923159 +7 1617890 +8 1450532 +9 1717624 +10 1477550 +11 1489223 +12 915528 +13 819193 +14 593660 +15 518924 +16 682065 +17 494584 +18 400591 +19 396421 +20 340994 +21 280688 +22 250328 +23 226786 +24 199927 +25 182707 +26 164629 +27 141613 +28 127554 +29 116286 +30 107682 +31 96367 +32 88002 +33 79234 +34 72186 +35 65921 +36 60374 +37 55976 +38 52166 +39 47414 +40 44932 +41 41279 +42 38232 +43 35463 +44 33703 +45 30557 +46 29639 +47 27000 +48 25447 +49 23186 +50 22093 +51 20412 +52 19844 +53 18261 +54 17561 +55 16499 +56 15597 +57 14558 +58 14372 +59 13445 +60 13514 +61 12058 +62 11145 +63 10767 +64 10370 +65 9630 +66 9337 +67 8881 +68 8727 +69 8060 +70 7994 +71 7740 +72 7189 +73 6729 +74 6749 +75 6548 +76 6321 +77 5957 +78 5740 +79 5407 +80 5370 +81 5035 +82 4921 +83 4656 +84 4600 +85 4519 +86 4277 +87 4023 +88 3939 +89 3910 +90 3861 +91 3560 +92 3483 +93 3406 +94 3346 +95 3229 +96 3122 +97 3086 +98 3001 +99 2884 +100 2822 +101 2677 +102 2670 +103 2610 +104 2452 +105 2446 +106 2400 +107 2300 +108 2316 +109 2196 +110 2089 +111 2083 +112 2041 +113 1881 +114 1838 +115 1896 +116 1795 +117 1786 +118 1743 +119 1765 +120 1750 +121 1683 +122 1563 +123 1499 +124 1513 +125 1462 +126 1388 +127 1441 +128 1417 +129 1392 +130 1306 +131 1321 +132 1274 +133 1294 +134 1240 +135 1126 +136 1157 +137 1130 +138 1084 +139 1130 +140 1083 +141 1040 +142 980 +143 1031 +144 974 +145 980 +146 932 +147 898 +148 960 +149 907 +150 852 +151 912 +152 859 +153 847 +154 876 +155 792 +156 791 +157 765 +158 788 +159 787 +160 744 +161 673 +162 683 +163 697 +164 666 +165 680 +166 632 +167 677 +168 657 +169 618 +170 587 +171 585 +172 567 +173 549 +174 562 +175 548 +176 542 +177 539 +178 542 +179 549 +180 547 +181 526 +182 525 +183 514 +184 512 +185 505 +186 515 +187 467 +188 475 +189 458 +190 435 +191 443 +192 427 +193 424 +194 404 +195 389 +196 429 +197 404 +198 386 +199 351 +200 388 +201 408 +202 361 +203 346 +204 324 +205 361 +206 363 +207 364 +208 323 +209 336 +210 342 +211 315 +212 325 +213 328 +214 314 +215 327 +216 320 +217 300 +218 295 +219 315 +220 310 +221 295 +222 275 +223 248 +224 274 +225 232 +226 293 +227 259 +228 286 +229 263 +230 242 +231 214 +232 261 +233 231 +234 211 +235 250 +236 233 +237 206 +238 224 +239 210 +240 233 +241 223 +242 216 +243 222 +244 207 +245 212 +246 196 +247 205 +248 201 +249 202 +250 211 +251 201 +252 215 +253 179 +254 163 +255 179 +256 191 +257 188 +258 196 +259 150 +260 154 +261 176 +262 211 +263 166 +264 171 +265 165 +266 149 +267 182 +268 159 +269 161 +270 164 +271 161 +272 141 +273 151 +274 127 +275 129 +276 142 +277 158 +278 148 +279 135 +280 127 +281 134 +282 138 +283 131 +284 126 +285 125 +286 130 +287 126 +288 135 +289 125 +290 135 +291 131 +292 95 +293 135 +294 106 +295 117 +296 136 +297 128 +298 128 +299 118 +300 109 +301 112 +302 117 +303 108 +304 120 +305 100 +306 95 +307 108 +308 112 +309 77 +310 120 +311 104 +312 109 +313 89 +314 98 +315 82 +316 98 +317 93 +318 77 +319 93 +320 77 +321 98 +322 93 +323 86 +324 89 +325 73 +326 70 +327 71 +328 77 +329 87 +330 77 +331 93 +332 100 +333 83 +334 72 +335 74 +336 69 +337 77 +338 68 +339 78 +340 90 +341 98 +342 75 +343 80 +344 63 +345 71 +346 83 +347 66 +348 71 +349 70 +350 62 +351 62 +352 59 +353 63 +354 62 +355 52 +356 64 +357 64 +358 56 +359 49 +360 57 +361 63 +362 60 +363 68 +364 62 +365 55 +366 54 +367 40 +368 75 +369 70 +370 53 +371 58 +372 57 +373 55 +374 69 +375 57 +376 53 +377 43 +378 45 +379 47 +380 56 +381 51 +382 59 +383 51 +384 43 +385 34 +386 57 +387 49 +388 39 +389 46 +390 48 +391 43 +392 40 +393 54 +394 50 +395 41 +396 43 +397 33 +398 27 +399 49 +400 44 +401 44 +402 38 +403 30 +404 32 +405 37 +406 39 +407 42 +408 53 +409 39 +410 34 +411 31 +412 32 +413 52 +414 27 +415 41 +416 34 +417 36 +418 50 +419 35 +420 32 +421 33 +422 45 +423 35 +424 40 +425 29 +426 41 +427 40 +428 39 +429 32 +430 31 +431 34 +432 29 +433 27 +434 26 +435 22 +436 34 +437 28 +438 30 +439 38 +440 35 +441 36 +442 36 +443 27 +444 24 +445 33 +446 31 +447 25 +448 33 +449 27 +450 32 +451 46 +452 31 +453 35 +454 35 +455 34 +456 26 +457 21 +458 25 +459 26 +460 24 +461 27 +462 33 +463 30 +464 35 +465 21 +466 32 +467 19 +468 27 +469 16 +470 28 +471 26 +472 27 +473 26 +474 25 +475 25 +476 27 +477 20 +478 28 +479 22 +480 23 +481 16 +482 25 +483 27 +484 19 +485 23 +486 19 +487 15 +488 15 +489 23 +490 24 +491 19 +492 20 +493 18 +494 17 +495 30 +496 28 +497 20 +498 29 +499 17 +500 19 +501 21 +502 15 +503 24 +504 15 +505 19 +506 25 +507 16 +508 23 +509 26 +510 21 +511 15 +512 12 +513 16 +514 18 +515 24 +516 26 +517 18 +518 8 +519 25 +520 14 +521 8 +522 24 +523 20 +524 18 +525 15 +526 13 +527 17 +528 18 +529 22 +530 21 +531 9 +532 16 +533 17 +534 13 +535 17 +536 15 +537 13 +538 20 +539 13 +540 19 +541 29 +542 10 +543 8 +544 18 +545 13 +546 9 +547 18 +548 10 +549 18 +550 18 +551 9 +552 9 +553 15 +554 13 +555 15 +556 14 +557 14 +558 18 +559 8 +560 13 +561 9 +562 7 +563 12 +564 6 +565 9 +566 9 +567 18 +568 9 +569 10 +570 13 +571 14 +572 13 +573 21 +574 8 +575 16 +576 12 +577 9 +578 16 +579 17 +580 22 +581 6 +582 14 +583 13 +584 15 +585 11 +586 13 +587 5 +588 12 +589 13 +590 15 +591 13 +592 15 +593 12 +594 7 +595 18 +596 12 +597 13 +598 13 +599 13 +600 12 +601 12 +602 10 +603 11 +604 6 +605 6 +606 2 +607 9 +608 8 +609 12 +610 9 +611 12 +612 13 +613 12 +614 14 +615 9 +616 8 +617 9 +618 14 +619 13 +620 12 +621 6 +622 8 +623 8 +624 8 +625 12 +626 8 +627 7 +628 5 +629 8 +630 12 +631 6 +632 10 +633 10 +634 7 +635 8 +636 9 +637 6 +638 9 +639 4 +640 12 +641 4 +642 3 +643 11 +644 10 +645 6 +646 12 +647 12 +648 4 +649 4 +650 9 +651 8 +652 6 +653 5 +654 14 +655 10 +656 11 +657 8 +658 5 +659 5 +660 9 +661 13 +662 4 +663 5 +664 9 +665 11 +666 12 +667 7 +668 13 +669 2 +670 1 +671 7 +672 7 +673 7 +674 10 +675 9 +676 6 +677 5 +678 7 +679 6 +680 3 +681 3 +682 4 +683 9 +684 8 +685 5 +686 3 +687 11 +688 9 +689 2 +690 6 +691 5 +692 9 +693 5 +694 6 +695 5 +696 9 +697 8 +698 3 +699 7 +700 5 +701 9 +702 8 +703 7 +704 2 +705 3 +706 7 +707 6 +708 6 +709 10 +710 2 +711 10 +712 6 +713 7 +714 5 +715 6 +716 4 +717 6 +718 8 +719 4 +720 6 +721 7 +722 5 +723 7 +724 3 +725 10 +726 10 +727 3 +728 7 +729 7 +730 5 +731 2 +732 1 +733 5 +734 1 +735 5 +736 6 +737 2 +738 2 +739 3 +740 7 +741 2 +742 7 +743 4 +744 5 +745 4 +746 5 +747 3 +748 1 +749 4 +750 4 +751 2 +752 4 +753 6 +754 6 +755 6 +756 3 +757 2 +758 5 +759 5 +760 3 +761 4 +762 2 +763 1 +764 8 +765 3 +766 4 +767 3 +768 1 +769 5 +770 3 +771 3 +772 4 +773 4 +774 1 +775 3 +776 2 +777 2 +778 3 +779 3 +780 1 +781 4 +782 3 +783 4 +784 6 +785 3 +786 5 +787 4 +788 2 +789 4 +790 5 +791 4 +792 6 +794 4 +795 1 +796 1 +797 4 +798 2 +799 3 +800 3 +801 1 +802 5 +803 5 +804 3 +805 3 +806 3 +807 4 +808 4 +809 2 +811 5 +812 4 +813 6 +814 3 +815 2 +816 2 +817 3 +818 5 +819 3 +820 1 +821 1 +822 4 +823 3 +824 4 +825 8 +826 3 +827 5 +828 5 +829 3 +830 6 +831 3 +832 4 +833 8 +834 5 +835 3 +836 3 +837 2 +838 4 +839 2 +840 1 +841 3 +842 2 +843 1 +844 3 +846 4 +847 4 +848 3 +849 3 +850 2 +851 3 +853 1 +854 4 +855 4 +856 2 +857 4 +858 1 +859 2 +860 5 +861 1 +862 1 +863 4 +864 2 +865 2 +867 5 +868 1 +869 4 +870 1 +871 1 +872 1 +873 2 +875 5 +876 3 +877 1 +878 3 +879 3 +880 3 +881 2 +882 1 +883 6 +884 2 +885 2 +886 1 +887 1 +888 3 +889 2 +890 2 +891 3 +892 1 +893 3 +894 1 +895 5 +896 1 +897 3 +899 2 +900 2 +902 1 +903 2 +904 4 +905 4 +906 3 +907 1 +908 1 +909 2 +910 5 +911 2 +912 3 +914 1 +915 1 +916 2 +918 2 +919 2 +920 4 +921 4 +922 1 +923 1 +924 4 +925 5 +926 1 +928 2 +929 1 +930 1 +931 1 +932 1 +933 1 +934 2 +935 1 +936 1 +937 1 +938 2 +939 1 +941 1 +942 4 +944 2 +945 2 +946 2 +947 1 +948 1 +950 1 +951 2 +953 1 +954 2 +955 1 +956 1 +957 2 +958 1 +960 3 +962 4 +963 1 +964 1 +965 3 +966 2 +967 2 +968 1 +969 3 +970 3 +972 1 +974 4 +975 3 +976 3 +977 2 +979 2 +980 1 +981 1 +983 5 +984 1 +985 3 +986 1 +987 2 +988 4 +989 2 +991 2 +992 2 +993 1 +994 1 +996 2 +997 2 +998 1 +999 3 +1000 2 +1001 1 +1002 3 +1003 3 +1004 2 +1005 3 +1006 1 +1007 2 +1009 1 +1011 1 +1013 3 +1014 1 +1016 2 +1017 1 +1018 1 +1019 1 +1020 4 +1021 1 +1022 2 +1025 1 +1026 1 +1027 2 +1028 1 +1030 1 +1031 2 +1032 4 +1034 3 +1035 2 +1036 1 +1038 1 +1039 1 +1040 1 +1041 1 +1042 2 +1043 1 +1044 2 +1045 4 +1048 1 +1050 1 +1051 1 +1052 2 +1054 1 +1055 3 +1056 2 +1057 1 +1059 1 +1061 2 +1063 1 +1064 1 +1065 1 +1066 1 +1067 1 +1068 1 +1069 2 +1074 1 +1075 1 +1077 1 +1078 1 +1079 1 +1082 1 +1085 1 +1088 1 +1090 1 +1091 1 +1092 2 +1094 2 +1097 2 +1098 1 +1099 2 +1101 2 +1102 1 +1104 1 +1105 1 +1107 1 +1109 1 +1111 2 +1112 1 +1114 2 +1115 2 +1116 2 +1117 1 +1118 1 +1119 1 +1120 1 +1122 1 +1123 1 +1127 1 +1128 3 +1132 2 +1138 3 +1142 1 +1145 4 +1150 1 +1153 2 +1154 1 +1158 1 +1159 1 +1163 1 +1165 1 +1169 2 +1174 1 +1176 1 +1177 1 +1178 2 +1179 1 +1180 2 +1181 1 +1182 1 +1183 2 +1185 1 +1187 1 +1191 2 +1193 1 +1195 3 +1196 1 +1201 3 +1203 1 +1206 1 +1210 1 +1213 1 +1214 1 +1215 2 +1218 1 +1220 1 +1221 1 +1225 1 +1226 1 +1233 2 +1241 1 +1243 1 +1249 1 +1250 2 +1251 1 +1254 1 +1255 2 +1260 1 +1268 1 +1270 1 +1273 1 +1274 1 +1277 1 +1284 1 +1287 1 +1291 1 +1292 2 +1294 1 +1295 2 +1297 1 +1298 1 +1301 1 +1307 1 +1308 3 +1311 2 +1313 1 +1316 1 +1321 1 +1324 1 +1325 1 +1330 1 +1333 1 +1334 1 +1338 2 +1340 1 +1341 1 +1342 1 +1343 1 +1345 1 +1355 1 +1357 1 +1360 2 +1375 1 +1376 1 +1380 1 +1383 1 +1387 1 +1389 1 +1393 1 +1394 1 +1396 1 +1398 1 +1410 1 +1414 1 +1419 1 +1425 1 +1434 1 +1435 1 +1438 1 +1439 1 +1447 1 +1455 2 +1460 1 +1461 1 +1463 1 +1466 1 +1470 1 +1473 1 +1478 1 +1480 1 +1483 1 +1484 1 +1485 2 +1492 2 +1499 1 +1509 1 +1512 1 +1513 1 +1523 1 +1524 1 +1525 2 +1529 1 +1539 1 +1544 1 +1568 1 +1584 1 +1591 1 +1598 1 +1600 1 +1604 1 +1614 1 +1617 1 +1621 1 +1622 1 +1626 1 +1638 1 +1648 1 +1658 1 +1661 1 +1679 1 +1682 1 +1693 1 +1700 1 +1705 1 +1707 1 +1722 1 +1728 1 +1758 1 +1762 1 +1763 1 +1775 1 +1776 1 +1801 1 +1810 1 +1812 1 +1827 1 +1834 1 +1846 1 +1847 1 +1848 1 +1851 1 +1862 1 +1866 1 +1877 2 +1884 1 +1888 1 +1903 1 +1912 1 +1925 1 +1938 1 +1955 1 +1998 1 +2054 1 +2058 1 +2065 1 +2069 1 +2076 1 +2089 1 +2104 1 +2111 1 +2133 1 +2138 1 +2156 1 +2204 1 +2212 1 +2237 1 +2246 2 +2298 1 +2304 1 +2360 1 +2400 1 +2481 1 +2544 1 +2586 1 +2622 1 +2666 1 +2682 1 +2725 1 +2920 1 +3997 1 +4019 1 +5211 1 +12 19 +14 1 +16 401 +18 2 +20 421 +22 557 +24 625 +26 50 +28 4481 +30 52 +32 550 +34 5840 +36 4644 +38 87 +40 5794 +41 33 +42 571 +44 11805 +46 4711 +47 7 +48 597 +49 12 +50 678 +51 2 +52 14715 +53 3 +54 7322 +55 3 +56 508 +57 39 +58 3486 +59 11 +60 8974 +61 45 +62 1276 +63 4 +64 15693 +65 15 +66 657 +67 13 +68 6409 +69 10 +70 3188 +71 25 +72 1889 +73 27 +74 10370 +75 9 +76 12432 +77 23 +78 520 +79 15 +80 1534 +81 29 +82 2944 +83 23 +84 12071 +85 36 +86 1502 +87 10 +88 10978 +89 11 +90 889 +91 16 +92 4571 +93 17 +94 7855 +95 21 +96 2271 +97 33 +98 1423 +99 15 +100 11096 +101 21 +102 4082 +103 13 +104 5442 +105 25 +106 2113 +107 26 +108 3779 +109 43 +110 1294 +111 29 +112 7860 +113 29 +114 4965 +115 22 +116 7898 +117 25 +118 1772 +119 28 +120 1149 +121 38 +122 1483 +123 32 +124 10572 +125 25 +126 1147 +127 31 +128 1699 +129 22 +130 5533 +131 22 +132 4669 +133 34 +134 3777 +135 10 +136 5412 +137 21 +138 855 +139 26 +140 2485 +141 46 +142 1970 +143 27 +144 6565 +145 40 +146 933 +147 15 +148 7923 +149 16 +150 735 +151 23 +152 1111 +153 33 +154 3714 +155 27 +156 2445 +157 30 +158 3367 +159 10 +160 4646 +161 27 +162 990 +163 23 +164 5679 +165 25 +166 2186 +167 17 +168 899 +169 32 +170 1034 +171 22 +172 6185 +173 32 +174 2685 +175 17 +176 1354 +177 38 +178 1460 +179 15 +180 3478 +181 20 +182 958 +183 20 +184 6055 +185 23 +186 2180 +187 15 +188 1416 +189 30 +190 1284 +191 22 +192 1341 +193 21 +194 2413 +195 18 +196 4984 +197 13 +198 830 +199 22 +200 1834 +201 19 +202 2238 +203 9 +204 3050 +205 22 +206 616 +207 17 +208 2892 +209 22 +210 711 +211 30 +212 2631 +213 19 +214 3341 +215 21 +216 987 +217 26 +218 823 +219 9 +220 3588 +221 20 +222 692 +223 7 +224 2925 +225 31 +226 1075 +227 16 +228 2909 +229 18 +230 673 +231 20 +232 2215 +233 14 +234 1584 +235 21 +236 1292 +237 29 +238 1647 +239 25 +240 1014 +241 30 +242 1648 +243 19 +244 4465 +245 10 +246 787 +247 11 +248 480 +249 25 +250 842 +251 15 +252 1219 +253 23 +254 1508 +255 8 +256 3525 +257 16 +258 490 +259 12 +260 1678 +261 14 +262 822 +263 16 +264 1729 +265 28 +266 604 +267 11 +268 2572 +269 7 +270 1242 +271 15 +272 725 +273 18 +274 1983 +275 13 +276 1662 +277 19 +278 491 +279 12 +280 1586 +281 14 +282 563 +283 10 +284 2363 +285 10 +286 656 +287 14 +288 725 +289 28 +290 871 +291 9 +292 2606 +293 12 +294 961 +295 9 +296 478 +297 13 +298 1252 +299 10 +300 736 +301 19 +302 466 +303 13 +304 2254 +305 12 +306 486 +307 14 +308 1145 +309 13 +310 955 +311 13 +312 1235 +313 13 +314 931 +315 14 +316 1768 +317 11 +318 330 +319 10 +320 539 +321 23 +322 570 +323 12 +324 1789 +325 13 +326 884 +327 5 +328 1422 +329 14 +330 317 +331 11 +332 509 +333 13 +334 1062 +335 12 +336 577 +337 27 +338 378 +339 10 +340 2313 +341 9 +342 391 +343 13 +344 894 +345 17 +346 664 +347 9 +348 453 +349 6 +350 363 +351 15 +352 1115 +353 13 +354 1054 +355 8 +356 1108 +357 12 +358 354 +359 7 +360 363 +361 16 +362 344 +363 11 +364 1734 +365 12 +366 265 +367 10 +368 969 +369 16 +370 316 +371 12 +372 757 +373 7 +374 563 +375 15 +376 857 +377 9 +378 469 +379 9 +380 385 +381 12 +382 921 +383 15 +384 764 +385 14 +386 246 +387 6 +388 1108 +389 14 +390 230 +391 8 +392 266 +393 11 +394 641 +395 8 +396 719 +397 9 +398 243 +399 4 +400 1108 +401 7 +402 229 +403 7 +404 903 +405 7 +406 257 +407 12 +408 244 +409 3 +410 541 +411 6 +412 744 +413 8 +414 419 +415 8 +416 388 +417 19 +418 470 +419 14 +420 612 +421 6 +422 342 +423 3 +424 1179 +425 3 +426 116 +427 14 +428 207 +429 6 +430 255 +431 4 +432 288 +433 12 +434 343 +435 6 +436 1015 +437 3 +438 538 +439 10 +440 194 +441 6 +442 188 +443 15 +444 524 +445 7 +446 214 +447 7 +448 574 +449 6 +450 214 +451 5 +452 635 +453 9 +454 464 +455 5 +456 205 +457 9 +458 163 +459 2 +460 558 +461 4 +462 171 +463 14 +464 444 +465 11 +466 543 +467 5 +468 388 +469 6 +470 141 +471 4 +472 647 +473 3 +474 210 +475 4 +476 193 +477 7 +478 195 +479 7 +480 443 +481 10 +482 198 +483 3 +484 816 +485 6 +486 128 +487 9 +488 215 +489 9 +490 328 +491 7 +492 158 +493 11 +494 335 +495 8 +496 435 +497 6 +498 174 +499 1 +500 373 +501 5 +502 140 +503 7 +504 330 +505 9 +506 149 +507 5 +508 642 +509 3 +510 179 +511 3 +512 159 +513 8 +514 204 +515 7 +516 306 +517 4 +518 110 +519 5 +520 326 +521 6 +522 305 +523 6 +524 294 +525 7 +526 268 +527 5 +528 149 +529 4 +530 133 +531 2 +532 513 +533 10 +534 116 +535 5 +536 258 +537 4 +538 113 +539 4 +540 138 +541 6 +542 116 +544 485 +545 4 +546 93 +547 9 +548 299 +549 3 +550 256 +551 6 +552 92 +553 3 +554 175 +555 6 +556 253 +557 7 +558 95 +559 2 +560 128 +561 4 +562 206 +563 2 +564 465 +565 3 +566 69 +567 3 +568 157 +569 7 +570 97 +571 8 +572 118 +573 5 +574 130 +575 4 +576 301 +577 6 +578 177 +579 2 +580 397 +581 3 +582 80 +583 1 +584 128 +585 5 +586 52 +587 2 +588 72 +589 1 +590 84 +591 6 +592 323 +593 11 +594 77 +595 5 +596 205 +597 1 +598 244 +599 4 +600 69 +601 3 +602 89 +603 5 +604 254 +605 6 +606 147 +607 3 +608 83 +609 3 +610 77 +611 3 +612 194 +613 1 +614 98 +615 3 +616 243 +617 3 +618 50 +619 8 +620 188 +621 4 +622 67 +623 4 +624 123 +625 2 +626 50 +627 1 +628 239 +629 2 +630 51 +631 4 +632 65 +633 5 +634 188 +636 81 +637 3 +638 46 +639 3 +640 103 +641 1 +642 136 +643 3 +644 188 +645 3 +646 58 +648 122 +649 4 +650 47 +651 2 +652 155 +653 4 +654 71 +655 1 +656 71 +657 3 +658 50 +659 2 +660 177 +661 5 +662 66 +663 2 +664 183 +665 3 +666 50 +667 2 +668 53 +669 2 +670 115 +672 66 +673 2 +674 47 +675 1 +676 197 +677 2 +678 46 +679 3 +680 95 +681 3 +682 46 +683 3 +684 107 +685 1 +686 86 +687 2 +688 158 +689 4 +690 51 +691 1 +692 80 +694 56 +695 4 +696 40 +698 43 +699 3 +700 95 +701 2 +702 51 +703 2 +704 133 +705 1 +706 100 +707 2 +708 121 +709 2 +710 15 +711 3 +712 35 +713 2 +714 20 +715 3 +716 37 +717 2 +718 78 +720 55 +721 1 +722 42 +723 2 +724 218 +725 3 +726 23 +727 2 +728 26 +729 1 +730 64 +731 2 +732 65 +734 24 +735 2 +736 53 +737 1 +738 32 +739 1 +740 60 +742 81 +743 1 +744 77 +745 1 +746 47 +747 1 +748 62 +749 1 +750 19 +751 1 +752 86 +753 3 +754 40 +756 55 +757 2 +758 38 +759 1 +760 101 +761 1 +762 22 +764 67 +765 2 +766 35 +767 1 +768 38 +769 1 +770 22 +771 1 +772 82 +773 1 +774 73 +776 29 +777 1 +778 55 +780 23 +781 1 +782 16 +784 84 +785 3 +786 28 +788 59 +789 1 +790 33 +791 3 +792 24 +794 13 +795 1 +796 110 +797 2 +798 15 +800 22 +801 3 +802 29 +803 1 +804 87 +806 21 +808 29 +810 48 +812 28 +813 1 +814 58 +815 1 +816 48 +817 1 +818 31 +819 1 +820 66 +822 17 +823 2 +824 58 +826 10 +827 2 +828 25 +829 1 +830 29 +831 1 +832 63 +833 1 +834 26 +835 3 +836 52 +837 1 +838 18 +840 27 +841 2 +842 12 +843 1 +844 83 +845 1 +846 7 +847 1 +848 10 +850 26 +852 25 +853 1 +854 15 +856 27 +858 32 +859 1 +860 15 +862 43 +864 32 +865 1 +866 6 +868 39 +870 11 +872 25 +873 1 +874 10 +875 1 +876 20 +877 2 +878 19 +879 1 +880 30 +882 11 +884 53 +886 25 +887 1 +888 28 +890 6 +892 36 +894 10 +896 13 +898 14 +900 31 +902 14 +903 2 +904 43 +906 25 +908 9 +910 11 +911 1 +912 16 +913 1 +914 24 +916 27 +918 6 +920 15 +922 27 +923 1 +924 23 +926 13 +928 42 +929 1 +930 3 +932 27 +934 17 +936 8 +937 1 +938 11 +940 33 +942 4 +943 1 +944 18 +946 15 +948 13 +950 18 +952 12 +954 11 +956 21 +958 10 +960 13 +962 5 +964 32 +966 13 +968 8 +970 8 +971 1 +972 23 +973 2 +974 12 +975 1 +976 22 +978 7 +979 1 +980 14 +982 8 +984 22 +985 1 +986 6 +988 17 +989 1 +990 6 +992 13 +994 19 +996 11 +998 4 +1000 9 +1002 2 +1004 14 +1006 5 +1008 3 +1010 9 +1012 29 +1014 6 +1016 22 +1017 1 +1018 8 +1019 1 +1020 7 +1022 6 +1023 1 +1024 10 +1026 2 +1028 8 +1030 11 +1031 2 +1032 8 +1034 9 +1036 13 +1038 12 +1040 12 +1042 3 +1044 12 +1046 3 +1048 11 +1050 2 +1051 1 +1052 2 +1054 11 +1056 6 +1058 8 +1059 1 +1060 23 +1062 6 +1063 1 +1064 8 +1066 3 +1068 6 +1070 8 +1071 1 +1072 5 +1074 3 +1076 5 +1078 3 +1080 11 +1081 1 +1082 7 +1084 18 +1086 4 +1087 1 +1088 3 +1090 3 +1092 7 +1094 3 +1096 12 +1098 6 +1099 1 +1100 2 +1102 6 +1104 14 +1106 3 +1108 6 +1110 5 +1112 2 +1114 8 +1116 3 +1118 3 +1120 7 +1122 10 +1124 6 +1126 8 +1128 1 +1130 4 +1132 3 +1134 2 +1136 5 +1138 5 +1140 8 +1142 3 +1144 7 +1146 3 +1148 11 +1150 1 +1152 5 +1154 1 +1156 5 +1158 1 +1160 5 +1162 3 +1164 6 +1165 1 +1166 1 +1168 4 +1169 1 +1170 3 +1171 1 +1172 2 +1174 5 +1176 3 +1177 1 +1180 8 +1182 2 +1184 4 +1186 2 +1188 3 +1190 2 +1192 5 +1194 6 +1196 1 +1198 2 +1200 2 +1204 10 +1206 2 +1208 9 +1210 1 +1214 6 +1216 3 +1218 4 +1220 9 +1221 2 +1222 1 +1224 5 +1226 4 +1228 8 +1230 1 +1232 1 +1234 3 +1236 5 +1240 3 +1242 1 +1244 3 +1245 1 +1246 4 +1248 6 +1250 2 +1252 7 +1256 3 +1258 2 +1260 2 +1262 3 +1264 4 +1265 1 +1266 1 +1270 1 +1271 1 +1272 2 +1274 3 +1276 3 +1278 1 +1280 3 +1284 1 +1286 1 +1290 1 +1292 3 +1294 1 +1296 7 +1300 2 +1302 4 +1304 3 +1306 2 +1308 2 +1312 1 +1314 1 +1316 3 +1318 2 +1320 1 +1324 8 +1326 1 +1330 1 +1331 1 +1336 2 +1338 1 +1340 3 +1341 1 +1344 1 +1346 2 +1347 1 +1348 3 +1352 1 +1354 2 +1356 1 +1358 1 +1360 3 +1362 1 +1364 4 +1366 1 +1370 1 +1372 3 +1380 2 +1384 2 +1388 2 +1390 2 +1392 2 +1394 1 +1396 1 +1398 1 +1400 2 +1402 1 +1404 1 +1406 1 +1410 1 +1412 5 +1418 1 +1420 1 +1424 1 +1432 2 +1434 2 +1442 3 +1444 5 +1448 1 +1454 1 +1456 1 +1460 3 +1462 4 +1468 1 +1474 1 +1476 1 +1478 2 +1480 1 +1486 2 +1488 1 +1492 1 +1496 1 +1500 3 +1503 1 +1506 1 +1512 2 +1516 1 +1522 1 +1524 2 +1534 4 +1536 1 +1538 1 +1540 2 +1544 2 +1548 1 +1556 1 +1560 1 +1562 1 +1564 2 +1566 1 +1568 1 +1570 1 +1572 1 +1576 1 +1590 1 +1594 1 +1604 1 +1608 1 +1614 1 +1622 1 +1624 2 +1628 1 +1629 1 +1636 1 +1642 1 +1654 2 +1660 1 +1664 1 +1670 1 +1684 4 +1698 1 +1732 3 +1742 1 +1752 1 +1760 1 +1764 1 +1772 2 +1798 1 +1808 1 +1820 1 +1852 1 +1856 1 +1874 1 +1902 1 +1908 1 +1952 1 +2004 1 +2018 1 +2020 1 +2028 1 +2174 1 +2233 1 +2244 1 +2280 1 +2290 1 +2352 1 +2604 1 +4190 1 diff --git a/modules/pytorchocr/utils/dict/te_dict.txt b/modules/pytorchocr/utils/dict/te_dict.txt new file mode 100644 index 0000000..83d74cc --- /dev/null +++ b/modules/pytorchocr/utils/dict/te_dict.txt @@ -0,0 +1,151 @@ +t +e +_ +i +m +g +/ +5 +I +L +S +V +R +C +2 +0 +1 +v +a +l +3 +4 +8 +9 +. +j +p +త +ె +ర +క +్ +ి +ం +చ +ే +ద +ు +7 +6 +ఉ +ా +మ +ట +ో +వ +ప +ల +శ +ఆ +య +ై +భ +' +ీ +గ +ూ +డ +ధ +హ +న +జ +స +[ +‌ +ష +అ +ణ +ఫ +బ +ఎ +; +ళ +థ +ొ +ఠ +ృ +ఒ +ఇ +ః +ఊ +ఖ +- +ఐ +ఘ +ౌ +ఏ +ఈ +ఛ +, +ఓ +ఞ +| +? +: +ఢ +" +( +” +! ++ +) +* += +& +“ +€ +] +£ +$ +s +c +n +w +k +J +G +u +d +r +E +o +h +y +b +f +B +M +O +T +N +D +P +A +F +x +W +Y +U +H +K +X +z +Z +Q +q +É +% +# +@ +é diff --git a/modules/pytorchocr/utils/dict/ug_dict.txt b/modules/pytorchocr/utils/dict/ug_dict.txt new file mode 100644 index 0000000..77602f2 --- /dev/null +++ b/modules/pytorchocr/utils/dict/ug_dict.txt @@ -0,0 +1,114 @@ +u +g +_ +i +m +/ +1 +I +L +S +V +R +C +2 +0 +v +a +l +8 +5 +3 +6 +9 +. +j +p + +ق +ا +پ +ل +4 +7 +ئ +ى +ش +ت +ي +ك +د +ف +ر +و +ن +ب +ە +خ +ې +چ +ۇ +ز +س +م +ۋ +گ +ڭ +ۆ +ۈ +ج +غ +ھ +ژ +s +c +e +n +w +P +E +D +U +d +r +b +y +B +o +O +Y +N +T +k +t +h +A +H +F +z +W +K +G +M +f +Z +X +Q +J +x +q +- +! +% +# +? +: +$ +, +& +' +É +@ +é +( ++ diff --git a/modules/pytorchocr/utils/dict/uk_dict.txt b/modules/pytorchocr/utils/dict/uk_dict.txt new file mode 100644 index 0000000..c5ffc0a --- /dev/null +++ b/modules/pytorchocr/utils/dict/uk_dict.txt @@ -0,0 +1,142 @@ +u +k +_ +i +m +g +/ +1 +6 +I +L +S +V +R +C +2 +0 +v +a +l +7 +9 +. +j +p +в +і +д +п +о +н +с +т +ю +4 +5 +3 +а +и +м +е +р +ч +у +Б +з +л +к +8 +А +В +г +є +б +ь +х +ґ +ш +ц +ф +я +щ +ж +Г +Х +У +Т +Е +І +Н +П +З +Л +Ю +С +Д +М +К +Р +Ф +О +Ц +И +Я +Ч +Ш +Ж +Є +Ґ +Ь +s +c +e +n +w +A +P +r +E +t +o +h +d +y +M +G +N +F +B +T +D +U +O +W +Z +f +H +Y +b +K +z +x +Q +X +q +J +$ +- +' +# +& +% +? +: +! +, ++ +@ +( +é +É + diff --git a/modules/pytorchocr/utils/dict/ur_dict.txt b/modules/pytorchocr/utils/dict/ur_dict.txt new file mode 100644 index 0000000..c06786a --- /dev/null +++ b/modules/pytorchocr/utils/dict/ur_dict.txt @@ -0,0 +1,137 @@ +u +r +_ +i +m +g +/ +3 +I +L +S +V +R +C +2 +0 +1 +v +a +l +9 +7 +8 +. +j +p + +چ +ٹ +پ +ا +ئ +ی +ے +4 +6 +و +ل +ن +ڈ +ھ +ک +ت +ش +ف +ق +ر +د +5 +ب +ج +خ +ہ +س +ز +غ +ڑ +ں +آ +م +ؤ +ط +ص +ح +ع +گ +ث +ض +ذ +ۓ +ِ +ء +ظ +ً +ي +ُ +ۃ +أ +ٰ +ە +ژ +ۂ +ة +ّ +ك +ه +s +c +e +n +w +o +d +t +D +M +T +U +E +b +P +h +y +W +H +A +x +B +O +N +G +Y +Q +F +k +K +q +J +Z +f +z +X +' +@ +& +! +, +: +$ +- +# +? +% +é ++ +( +É diff --git a/modules/pytorchocr/utils/dict/xi_dict.txt b/modules/pytorchocr/utils/dict/xi_dict.txt new file mode 100644 index 0000000..f195f1e --- /dev/null +++ b/modules/pytorchocr/utils/dict/xi_dict.txt @@ -0,0 +1,110 @@ +x +i +_ +m +g +/ +1 +0 +I +L +S +V +R +C +2 +v +a +l +3 +6 +4 +5 +. +j +p + +Q +u +e +r +o +8 +7 +n +c +9 +t +b +é +q +d +ó +y +F +s +, +O +í +T +f +" +U +M +h +: +P +H +A +E +D +z +N +á +ñ +ú +% +; +è ++ +Y +- +B +G +( +) +¿ +? +w +¡ +! +X +É +K +k +Á +ü +Ú +« +» +J +' +ö +W +Z +º +Ö +­ +[ +] +Ç +ç +à +ä +û +ò +Í +ê +ô +ø +ª diff --git a/modules/pytorchocr/utils/dict90.txt b/modules/pytorchocr/utils/dict90.txt new file mode 100644 index 0000000..a945ae9 --- /dev/null +++ b/modules/pytorchocr/utils/dict90.txt @@ -0,0 +1,90 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +: +; +< += +> +? +@ +[ +\ +] +_ +` +~ \ No newline at end of file diff --git a/modules/pytorchocr/utils/e2e_utils/extract_batchsize.py b/modules/pytorchocr/utils/e2e_utils/extract_batchsize.py new file mode 100644 index 0000000..6c6a8bf --- /dev/null +++ b/modules/pytorchocr/utils/e2e_utils/extract_batchsize.py @@ -0,0 +1,88 @@ +raise ValueError('utils -> e2e_utils -> extract_batchsize') +import paddle +import numpy as np +import copy + + +def org_tcl_rois(batch_size, pos_lists, pos_masks, label_lists, tcl_bs): + """ + """ + pos_lists_, pos_masks_, label_lists_ = [], [], [] + img_bs = batch_size + ngpu = int(batch_size / img_bs) + img_ids = np.array(pos_lists, dtype=np.int32)[:, 0, 0].copy() + pos_lists_split, pos_masks_split, label_lists_split = [], [], [] + for i in range(ngpu): + pos_lists_split.append([]) + pos_masks_split.append([]) + label_lists_split.append([]) + + for i in range(img_ids.shape[0]): + img_id = img_ids[i] + gpu_id = int(img_id / img_bs) + img_id = img_id % img_bs + pos_list = pos_lists[i].copy() + pos_list[:, 0] = img_id + pos_lists_split[gpu_id].append(pos_list) + pos_masks_split[gpu_id].append(pos_masks[i].copy()) + label_lists_split[gpu_id].append(copy.deepcopy(label_lists[i])) + # repeat or delete + for i in range(ngpu): + vp_len = len(pos_lists_split[i]) + if vp_len <= tcl_bs: + for j in range(0, tcl_bs - vp_len): + pos_list = pos_lists_split[i][j].copy() + pos_lists_split[i].append(pos_list) + pos_mask = pos_masks_split[i][j].copy() + pos_masks_split[i].append(pos_mask) + label_list = copy.deepcopy(label_lists_split[i][j]) + label_lists_split[i].append(label_list) + else: + for j in range(0, vp_len - tcl_bs): + c_len = len(pos_lists_split[i]) + pop_id = np.random.permutation(c_len)[0] + pos_lists_split[i].pop(pop_id) + pos_masks_split[i].pop(pop_id) + label_lists_split[i].pop(pop_id) + # merge + for i in range(ngpu): + pos_lists_.extend(pos_lists_split[i]) + pos_masks_.extend(pos_masks_split[i]) + label_lists_.extend(label_lists_split[i]) + return pos_lists_, pos_masks_, label_lists_ + + +def pre_process(label_list, pos_list, pos_mask, max_text_length, max_text_nums, + pad_num, tcl_bs): + label_list = label_list.numpy() + batch, _, _, _ = label_list.shape + pos_list = pos_list.numpy() + pos_mask = pos_mask.numpy() + pos_list_t = [] + pos_mask_t = [] + label_list_t = [] + for i in range(batch): + for j in range(max_text_nums): + if pos_mask[i, j].any(): + pos_list_t.append(pos_list[i][j]) + pos_mask_t.append(pos_mask[i][j]) + label_list_t.append(label_list[i][j]) + pos_list, pos_mask, label_list = org_tcl_rois(batch, pos_list_t, pos_mask_t, + label_list_t, tcl_bs) + label = [] + tt = [l.tolist() for l in label_list] + for i in range(tcl_bs): + k = 0 + for j in range(max_text_length): + if tt[i][j][0] != pad_num: + k += 1 + else: + break + label.append(k) + label = paddle.to_tensor(label) + label = paddle.cast(label, dtype='int64') + pos_list = paddle.to_tensor(pos_list) + pos_mask = paddle.to_tensor(pos_mask) + label_list = paddle.squeeze(paddle.to_tensor(label_list), axis=2) + label_list = paddle.cast(label_list, dtype='int32') + return pos_list, pos_mask, label_list, label diff --git a/modules/pytorchocr/utils/e2e_utils/extract_textpoint_fast.py b/modules/pytorchocr/utils/e2e_utils/extract_textpoint_fast.py new file mode 100644 index 0000000..787cd30 --- /dev/null +++ b/modules/pytorchocr/utils/e2e_utils/extract_textpoint_fast.py @@ -0,0 +1,457 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains various CTC decoders.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import cv2 +import math + +import numpy as np +from itertools import groupby +from skimage.morphology._skeletonize import thin + + +def get_dict(character_dict_path): + character_str = "" + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + for line in lines: + line = line.decode('utf-8').strip("\n").strip("\r\n") + character_str += line + dict_character = list(character_str) + return dict_character + + +def softmax(logits): + """ + logits: N x d + """ + max_value = np.max(logits, axis=1, keepdims=True) + exp = np.exp(logits - max_value) + exp_sum = np.sum(exp, axis=1, keepdims=True) + dist = exp / exp_sum + return dist + + +def get_keep_pos_idxs(labels, remove_blank=None): + """ + Remove duplicate and get pos idxs of keep items. + The value of keep_blank should be [None, 95]. + """ + duplicate_len_list = [] + keep_pos_idx_list = [] + keep_char_idx_list = [] + for k, v_ in groupby(labels): + current_len = len(list(v_)) + if k != remove_blank: + current_idx = int(sum(duplicate_len_list) + current_len // 2) + keep_pos_idx_list.append(current_idx) + keep_char_idx_list.append(k) + duplicate_len_list.append(current_len) + return keep_char_idx_list, keep_pos_idx_list + + +def remove_blank(labels, blank=0): + new_labels = [x for x in labels if x != blank] + return new_labels + + +def insert_blank(labels, blank=0): + new_labels = [blank] + for l in labels: + new_labels += [l, blank] + return new_labels + + +def ctc_greedy_decoder(probs_seq, blank=95, keep_blank_in_idxs=True): + """ + CTC greedy (best path) decoder. + """ + raw_str = np.argmax(np.array(probs_seq), axis=1) + remove_blank_in_pos = None if keep_blank_in_idxs else blank + dedup_str, keep_idx_list = get_keep_pos_idxs( + raw_str, remove_blank=remove_blank_in_pos) + dst_str = remove_blank(dedup_str, blank=blank) + return dst_str, keep_idx_list + + +def instance_ctc_greedy_decoder(gather_info, logits_map, pts_num=4): + _, _, C = logits_map.shape + ys, xs = zip(*gather_info) + logits_seq = logits_map[list(ys), list(xs)] + probs_seq = logits_seq + labels = np.argmax(probs_seq, axis=1) + dst_str = [k for k, v_ in groupby(labels) if k != C - 1] + detal = len(gather_info) // (pts_num - 1) + keep_idx_list = [0] + [detal * (i + 1) for i in range(pts_num - 2)] + [-1] + keep_gather_list = [gather_info[idx] for idx in keep_idx_list] + return dst_str, keep_gather_list + + +def ctc_decoder_for_image(gather_info_list, + logits_map, + Lexicon_Table, + pts_num=6): + """ + CTC decoder using multiple processes. + """ + decoder_str = [] + decoder_xys = [] + for gather_info in gather_info_list: + if len(gather_info) < pts_num: + continue + dst_str, xys_list = instance_ctc_greedy_decoder( + gather_info, logits_map, pts_num=pts_num) + dst_str_readable = ''.join([Lexicon_Table[idx] for idx in dst_str]) + if len(dst_str_readable) < 2: + continue + decoder_str.append(dst_str_readable) + decoder_xys.append(xys_list) + return decoder_str, decoder_xys + + +def sort_with_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + + def sort_part_with_direction(pos_list, point_direction): + pos_list = np.array(pos_list).reshape(-1, 2) + point_direction = np.array(point_direction).reshape(-1, 2) + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list[np.argsort(pos_proj_leng)].tolist() + sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist() + return sorted_list, sorted_direction + + pos_list = np.array(pos_list).reshape(-1, 2) + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + sorted_point, sorted_direction = sort_part_with_direction(pos_list, + point_direction) + + point_num = len(sorted_point) + if point_num >= 16: + middle_num = point_num // 2 + first_part_point = sorted_point[:middle_num] + first_point_direction = sorted_direction[:middle_num] + sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction( + first_part_point, first_point_direction) + + last_part_point = sorted_point[middle_num:] + last_point_direction = sorted_direction[middle_num:] + sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction( + last_part_point, last_point_direction) + sorted_point = sorted_fist_part_point + sorted_last_part_point + sorted_direction = sorted_fist_part_direction + sorted_last_part_direction + + return sorted_point, np.array(sorted_direction) + + +def add_id(pos_list, image_id=0): + """ + Add id for gather feature, for inference. + """ + new_list = [] + for item in pos_list: + new_list.append((image_id, item[0], item[1])) + return new_list + + +def sort_and_expand_with_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + h, w, _ = f_direction.shape + sorted_list, point_direction = sort_with_direction(pos_list, f_direction) + + point_num = len(sorted_list) + sub_direction_len = max(point_num // 3, 2) + left_direction = point_direction[:sub_direction_len, :] + right_dirction = point_direction[point_num - sub_direction_len:, :] + + left_average_direction = -np.mean(left_direction, axis=0, keepdims=True) + left_average_len = np.linalg.norm(left_average_direction) + left_start = np.array(sorted_list[0]) + left_step = left_average_direction / (left_average_len + 1e-6) + + right_average_direction = np.mean(right_dirction, axis=0, keepdims=True) + right_average_len = np.linalg.norm(right_average_direction) + right_step = right_average_direction / (right_average_len + 1e-6) + right_start = np.array(sorted_list[-1]) + + append_num = max( + int((left_average_len + right_average_len) / 2.0 * 0.15), 1) + left_list = [] + right_list = [] + for i in range(append_num): + ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ly < h and lx < w and (ly, lx) not in left_list: + left_list.append((ly, lx)) + ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ry < h and rx < w and (ry, rx) not in right_list: + right_list.append((ry, rx)) + + all_list = left_list[::-1] + sorted_list + right_list + return all_list + + +def sort_and_expand_with_direction_v2(pos_list, f_direction, binary_tcl_map): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + binary_tcl_map: h x w + """ + h, w, _ = f_direction.shape + sorted_list, point_direction = sort_with_direction(pos_list, f_direction) + + point_num = len(sorted_list) + sub_direction_len = max(point_num // 3, 2) + left_direction = point_direction[:sub_direction_len, :] + right_dirction = point_direction[point_num - sub_direction_len:, :] + + left_average_direction = -np.mean(left_direction, axis=0, keepdims=True) + left_average_len = np.linalg.norm(left_average_direction) + left_start = np.array(sorted_list[0]) + left_step = left_average_direction / (left_average_len + 1e-6) + + right_average_direction = np.mean(right_dirction, axis=0, keepdims=True) + right_average_len = np.linalg.norm(right_average_direction) + right_step = right_average_direction / (right_average_len + 1e-6) + right_start = np.array(sorted_list[-1]) + + append_num = max( + int((left_average_len + right_average_len) / 2.0 * 0.15), 1) + max_append_num = 2 * append_num + + left_list = [] + right_list = [] + for i in range(max_append_num): + ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ly < h and lx < w and (ly, lx) not in left_list: + if binary_tcl_map[ly, lx] > 0.5: + left_list.append((ly, lx)) + else: + break + + for i in range(max_append_num): + ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ry < h and rx < w and (ry, rx) not in right_list: + if binary_tcl_map[ry, rx] > 0.5: + right_list.append((ry, rx)) + else: + break + + all_list = left_list[::-1] + sorted_list + right_list + return all_list + + +def point_pair2poly(point_pair_list): + """ + Transfer vertical point_pairs into poly point in clockwise. + """ + point_num = len(point_pair_list) * 2 + point_list = [0] * point_num + for idx, point_pair in enumerate(point_pair_list): + point_list[idx] = point_pair[0] + point_list[point_num - 1 - idx] = point_pair[1] + return np.array(point_list).reshape(-1, 2) + + +def shrink_quad_along_width(quad, begin_width_ratio=0., end_width_ratio=1.): + ratio_pair = np.array( + [[begin_width_ratio], [end_width_ratio]], dtype=np.float32) + p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair + p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair + return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]]) + + +def expand_poly_along_width(poly, shrink_ratio_of_width=0.3): + """ + expand poly along width. + """ + point_num = poly.shape[0] + left_quad = np.array( + [poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32) + left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \ + (np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6) + left_quad_expand = shrink_quad_along_width(left_quad, left_ratio, 1.0) + right_quad = np.array( + [ + poly[point_num // 2 - 2], poly[point_num // 2 - 1], + poly[point_num // 2], poly[point_num // 2 + 1] + ], + dtype=np.float32) + right_ratio = 1.0 + shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \ + (np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6) + right_quad_expand = shrink_quad_along_width(right_quad, 0.0, right_ratio) + poly[0] = left_quad_expand[0] + poly[-1] = left_quad_expand[-1] + poly[point_num // 2 - 1] = right_quad_expand[1] + poly[point_num // 2] = right_quad_expand[2] + return poly + + +def restore_poly(instance_yxs_list, seq_strs, p_border, ratio_w, ratio_h, src_w, + src_h, valid_set): + poly_list = [] + keep_str_list = [] + for yx_center_line, keep_str in zip(instance_yxs_list, seq_strs): + if len(keep_str) < 2: + print('--> too short, {}'.format(keep_str)) + continue + + offset_expand = 1.0 + if valid_set == 'totaltext': + offset_expand = 1.2 + + point_pair_list = [] + for y, x in yx_center_line: + offset = p_border[:, y, x].reshape(2, 2) * offset_expand + ori_yx = np.array([y, x], dtype=np.float32) + point_pair = (ori_yx + offset)[:, ::-1] * 4.0 / np.array( + [ratio_w, ratio_h]).reshape(-1, 2) + point_pair_list.append(point_pair) + + detected_poly = point_pair2poly(point_pair_list) + detected_poly = expand_poly_along_width( + detected_poly, shrink_ratio_of_width=0.2) + detected_poly[:, 0] = np.clip(detected_poly[:, 0], a_min=0, a_max=src_w) + detected_poly[:, 1] = np.clip(detected_poly[:, 1], a_min=0, a_max=src_h) + + keep_str_list.append(keep_str) + if valid_set == 'partvgg': + middle_point = len(detected_poly) // 2 + detected_poly = detected_poly[ + [0, middle_point - 1, middle_point, -1], :] + poly_list.append(detected_poly) + elif valid_set == 'totaltext': + poly_list.append(detected_poly) + else: + print('--> Not supported format.') + exit(-1) + return poly_list, keep_str_list + + +def generate_pivot_list_fast(p_score, + p_char_maps, + f_direction, + Lexicon_Table, + score_thresh=0.5): + """ + return center point and end point of TCL instance; filter with the char maps; + """ + p_score = p_score[0] + f_direction = f_direction.transpose(1, 2, 0) + p_tcl_map = (p_score > score_thresh) * 1.0 + skeleton_map = thin(p_tcl_map.astype(np.uint8)) + instance_count, instance_label_map = cv2.connectedComponents( + skeleton_map.astype(np.uint8), connectivity=8) + + # get TCL Instance + all_pos_yxs = [] + if instance_count > 0: + for instance_id in range(1, instance_count): + pos_list = [] + ys, xs = np.where(instance_label_map == instance_id) + pos_list = list(zip(ys, xs)) + + if len(pos_list) < 3: + continue + + pos_list_sorted = sort_and_expand_with_direction_v2( + pos_list, f_direction, p_tcl_map) + all_pos_yxs.append(pos_list_sorted) + + p_char_maps = p_char_maps.transpose([1, 2, 0]) + decoded_str, keep_yxs_list = ctc_decoder_for_image( + all_pos_yxs, logits_map=p_char_maps, Lexicon_Table=Lexicon_Table) + return keep_yxs_list, decoded_str + + +def extract_main_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + pos_list = np.array(pos_list) + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] + point_direction = point_direction[:, ::-1] # x, y -> y, x + average_direction = np.mean(point_direction, axis=0, keepdims=True) + average_direction = average_direction / ( + np.linalg.norm(average_direction) + 1e-6) + return average_direction + + +def sort_by_direction_with_image_id_deprecated(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[id, y, x], [id, y, x], [id, y, x] ...] + """ + pos_list_full = np.array(pos_list).reshape(-1, 3) + pos_list = pos_list_full[:, 1:] + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist() + return sorted_list + + +def sort_by_direction_with_image_id(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + + def sort_part_with_direction(pos_list_full, point_direction): + pos_list_full = np.array(pos_list_full).reshape(-1, 3) + pos_list = pos_list_full[:, 1:] + point_direction = np.array(point_direction).reshape(-1, 2) + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist() + sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist() + return sorted_list, sorted_direction + + pos_list = np.array(pos_list).reshape(-1, 3) + point_direction = f_direction[pos_list[:, 1], pos_list[:, 2]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + sorted_point, sorted_direction = sort_part_with_direction(pos_list, + point_direction) + + point_num = len(sorted_point) + if point_num >= 16: + middle_num = point_num // 2 + first_part_point = sorted_point[:middle_num] + first_point_direction = sorted_direction[:middle_num] + sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction( + first_part_point, first_point_direction) + + last_part_point = sorted_point[middle_num:] + last_point_direction = sorted_direction[middle_num:] + sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction( + last_part_point, last_point_direction) + sorted_point = sorted_fist_part_point + sorted_last_part_point + sorted_direction = sorted_fist_part_direction + sorted_last_part_direction + + return sorted_point diff --git a/modules/pytorchocr/utils/e2e_utils/extract_textpoint_slow.py b/modules/pytorchocr/utils/e2e_utils/extract_textpoint_slow.py new file mode 100644 index 0000000..ace46fb --- /dev/null +++ b/modules/pytorchocr/utils/e2e_utils/extract_textpoint_slow.py @@ -0,0 +1,592 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains various CTC decoders.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import cv2 +import math + +import numpy as np +from itertools import groupby +from skimage.morphology._skeletonize import thin + + +def get_dict(character_dict_path): + character_str = "" + with open(character_dict_path, "rb") as fin: + lines = fin.readlines() + for line in lines: + line = line.decode('utf-8').strip("\n").strip("\r\n") + character_str += line + dict_character = list(character_str) + return dict_character + + +def point_pair2poly(point_pair_list): + """ + Transfer vertical point_pairs into poly point in clockwise. + """ + pair_length_list = [] + for point_pair in point_pair_list: + pair_length = np.linalg.norm(point_pair[0] - point_pair[1]) + pair_length_list.append(pair_length) + pair_length_list = np.array(pair_length_list) + pair_info = (pair_length_list.max(), pair_length_list.min(), + pair_length_list.mean()) + + point_num = len(point_pair_list) * 2 + point_list = [0] * point_num + for idx, point_pair in enumerate(point_pair_list): + point_list[idx] = point_pair[0] + point_list[point_num - 1 - idx] = point_pair[1] + return np.array(point_list).reshape(-1, 2), pair_info + + +def shrink_quad_along_width(quad, begin_width_ratio=0., end_width_ratio=1.): + """ + Generate shrink_quad_along_width. + """ + ratio_pair = np.array( + [[begin_width_ratio], [end_width_ratio]], dtype=np.float32) + p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair + p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair + return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]]) + + +def expand_poly_along_width(poly, shrink_ratio_of_width=0.3): + """ + expand poly along width. + """ + point_num = poly.shape[0] + left_quad = np.array( + [poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32) + left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \ + (np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6) + left_quad_expand = shrink_quad_along_width(left_quad, left_ratio, 1.0) + right_quad = np.array( + [ + poly[point_num // 2 - 2], poly[point_num // 2 - 1], + poly[point_num // 2], poly[point_num // 2 + 1] + ], + dtype=np.float32) + right_ratio = 1.0 + \ + shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \ + (np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6) + right_quad_expand = shrink_quad_along_width(right_quad, 0.0, right_ratio) + poly[0] = left_quad_expand[0] + poly[-1] = left_quad_expand[-1] + poly[point_num // 2 - 1] = right_quad_expand[1] + poly[point_num // 2] = right_quad_expand[2] + return poly + + +def softmax(logits): + """ + logits: N x d + """ + max_value = np.max(logits, axis=1, keepdims=True) + exp = np.exp(logits - max_value) + exp_sum = np.sum(exp, axis=1, keepdims=True) + dist = exp / exp_sum + return dist + + +def get_keep_pos_idxs(labels, remove_blank=None): + """ + Remove duplicate and get pos idxs of keep items. + The value of keep_blank should be [None, 95]. + """ + duplicate_len_list = [] + keep_pos_idx_list = [] + keep_char_idx_list = [] + for k, v_ in groupby(labels): + current_len = len(list(v_)) + if k != remove_blank: + current_idx = int(sum(duplicate_len_list) + current_len // 2) + keep_pos_idx_list.append(current_idx) + keep_char_idx_list.append(k) + duplicate_len_list.append(current_len) + return keep_char_idx_list, keep_pos_idx_list + + +def remove_blank(labels, blank=0): + new_labels = [x for x in labels if x != blank] + return new_labels + + +def insert_blank(labels, blank=0): + new_labels = [blank] + for l in labels: + new_labels += [l, blank] + return new_labels + + +def ctc_greedy_decoder(probs_seq, blank=95, keep_blank_in_idxs=True): + """ + CTC greedy (best path) decoder. + """ + raw_str = np.argmax(np.array(probs_seq), axis=1) + remove_blank_in_pos = None if keep_blank_in_idxs else blank + dedup_str, keep_idx_list = get_keep_pos_idxs( + raw_str, remove_blank=remove_blank_in_pos) + dst_str = remove_blank(dedup_str, blank=blank) + return dst_str, keep_idx_list + + +def instance_ctc_greedy_decoder(gather_info, + logits_map, + keep_blank_in_idxs=True): + """ + gather_info: [[x, y], [x, y] ...] + logits_map: H x W X (n_chars + 1) + """ + _, _, C = logits_map.shape + ys, xs = zip(*gather_info) + logits_seq = logits_map[list(ys), list(xs)] # n x 96 + probs_seq = softmax(logits_seq) + dst_str, keep_idx_list = ctc_greedy_decoder( + probs_seq, blank=C - 1, keep_blank_in_idxs=keep_blank_in_idxs) + keep_gather_list = [gather_info[idx] for idx in keep_idx_list] + return dst_str, keep_gather_list + + +def ctc_decoder_for_image(gather_info_list, logits_map, + keep_blank_in_idxs=True): + """ + CTC decoder using multiple processes. + """ + decoder_results = [] + for gather_info in gather_info_list: + res = instance_ctc_greedy_decoder( + gather_info, logits_map, keep_blank_in_idxs=keep_blank_in_idxs) + decoder_results.append(res) + return decoder_results + + +def sort_with_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + + def sort_part_with_direction(pos_list, point_direction): + pos_list = np.array(pos_list).reshape(-1, 2) + point_direction = np.array(point_direction).reshape(-1, 2) + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list[np.argsort(pos_proj_leng)].tolist() + sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist() + return sorted_list, sorted_direction + + pos_list = np.array(pos_list).reshape(-1, 2) + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + sorted_point, sorted_direction = sort_part_with_direction(pos_list, + point_direction) + + point_num = len(sorted_point) + if point_num >= 16: + middle_num = point_num // 2 + first_part_point = sorted_point[:middle_num] + first_point_direction = sorted_direction[:middle_num] + sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction( + first_part_point, first_point_direction) + + last_part_point = sorted_point[middle_num:] + last_point_direction = sorted_direction[middle_num:] + sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction( + last_part_point, last_point_direction) + sorted_point = sorted_fist_part_point + sorted_last_part_point + sorted_direction = sorted_fist_part_direction + sorted_last_part_direction + + return sorted_point, np.array(sorted_direction) + + +def add_id(pos_list, image_id=0): + """ + Add id for gather feature, for inference. + """ + new_list = [] + for item in pos_list: + new_list.append((image_id, item[0], item[1])) + return new_list + + +def sort_and_expand_with_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + h, w, _ = f_direction.shape + sorted_list, point_direction = sort_with_direction(pos_list, f_direction) + + # expand along + point_num = len(sorted_list) + sub_direction_len = max(point_num // 3, 2) + left_direction = point_direction[:sub_direction_len, :] + right_dirction = point_direction[point_num - sub_direction_len:, :] + + left_average_direction = -np.mean(left_direction, axis=0, keepdims=True) + left_average_len = np.linalg.norm(left_average_direction) + left_start = np.array(sorted_list[0]) + left_step = left_average_direction / (left_average_len + 1e-6) + + right_average_direction = np.mean(right_dirction, axis=0, keepdims=True) + right_average_len = np.linalg.norm(right_average_direction) + right_step = right_average_direction / (right_average_len + 1e-6) + right_start = np.array(sorted_list[-1]) + + append_num = max( + int((left_average_len + right_average_len) / 2.0 * 0.15), 1) + left_list = [] + right_list = [] + for i in range(append_num): + ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ly < h and lx < w and (ly, lx) not in left_list: + left_list.append((ly, lx)) + ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ry < h and rx < w and (ry, rx) not in right_list: + right_list.append((ry, rx)) + + all_list = left_list[::-1] + sorted_list + right_list + return all_list + + +def sort_and_expand_with_direction_v2(pos_list, f_direction, binary_tcl_map): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + binary_tcl_map: h x w + """ + h, w, _ = f_direction.shape + sorted_list, point_direction = sort_with_direction(pos_list, f_direction) + + # expand along + point_num = len(sorted_list) + sub_direction_len = max(point_num // 3, 2) + left_direction = point_direction[:sub_direction_len, :] + right_dirction = point_direction[point_num - sub_direction_len:, :] + + left_average_direction = -np.mean(left_direction, axis=0, keepdims=True) + left_average_len = np.linalg.norm(left_average_direction) + left_start = np.array(sorted_list[0]) + left_step = left_average_direction / (left_average_len + 1e-6) + + right_average_direction = np.mean(right_dirction, axis=0, keepdims=True) + right_average_len = np.linalg.norm(right_average_direction) + right_step = right_average_direction / (right_average_len + 1e-6) + right_start = np.array(sorted_list[-1]) + + append_num = max( + int((left_average_len + right_average_len) / 2.0 * 0.15), 1) + max_append_num = 2 * append_num + + left_list = [] + right_list = [] + for i in range(max_append_num): + ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ly < h and lx < w and (ly, lx) not in left_list: + if binary_tcl_map[ly, lx] > 0.5: + left_list.append((ly, lx)) + else: + break + + for i in range(max_append_num): + ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype( + 'int32').tolist() + if ry < h and rx < w and (ry, rx) not in right_list: + if binary_tcl_map[ry, rx] > 0.5: + right_list.append((ry, rx)) + else: + break + + all_list = left_list[::-1] + sorted_list + right_list + return all_list + + +def generate_pivot_list_curved(p_score, + p_char_maps, + f_direction, + score_thresh=0.5, + is_expand=True, + is_backbone=False, + image_id=0): + """ + return center point and end point of TCL instance; filter with the char maps; + """ + p_score = p_score[0] + f_direction = f_direction.transpose(1, 2, 0) + p_tcl_map = (p_score > score_thresh) * 1.0 + skeleton_map = thin(p_tcl_map) + instance_count, instance_label_map = cv2.connectedComponents( + skeleton_map.astype(np.uint8), connectivity=8) + + # get TCL Instance + all_pos_yxs = [] + center_pos_yxs = [] + end_points_yxs = [] + instance_center_pos_yxs = [] + pred_strs = [] + if instance_count > 0: + for instance_id in range(1, instance_count): + pos_list = [] + ys, xs = np.where(instance_label_map == instance_id) + pos_list = list(zip(ys, xs)) + + ### FIX-ME, eliminate outlier + if len(pos_list) < 3: + continue + + if is_expand: + pos_list_sorted = sort_and_expand_with_direction_v2( + pos_list, f_direction, p_tcl_map) + else: + pos_list_sorted, _ = sort_with_direction(pos_list, f_direction) + all_pos_yxs.append(pos_list_sorted) + + # use decoder to filter backgroud points. + p_char_maps = p_char_maps.transpose([1, 2, 0]) + decode_res = ctc_decoder_for_image( + all_pos_yxs, logits_map=p_char_maps, keep_blank_in_idxs=True) + for decoded_str, keep_yxs_list in decode_res: + if is_backbone: + keep_yxs_list_with_id = add_id(keep_yxs_list, image_id=image_id) + instance_center_pos_yxs.append(keep_yxs_list_with_id) + pred_strs.append(decoded_str) + else: + end_points_yxs.extend((keep_yxs_list[0], keep_yxs_list[-1])) + center_pos_yxs.extend(keep_yxs_list) + + if is_backbone: + return pred_strs, instance_center_pos_yxs + else: + return center_pos_yxs, end_points_yxs + + +def generate_pivot_list_horizontal(p_score, + p_char_maps, + f_direction, + score_thresh=0.5, + is_backbone=False, + image_id=0): + """ + return center point and end point of TCL instance; filter with the char maps; + """ + p_score = p_score[0] + f_direction = f_direction.transpose(1, 2, 0) + p_tcl_map_bi = (p_score > score_thresh) * 1.0 + instance_count, instance_label_map = cv2.connectedComponents( + p_tcl_map_bi.astype(np.uint8), connectivity=8) + + # get TCL Instance + all_pos_yxs = [] + center_pos_yxs = [] + end_points_yxs = [] + instance_center_pos_yxs = [] + + if instance_count > 0: + for instance_id in range(1, instance_count): + pos_list = [] + ys, xs = np.where(instance_label_map == instance_id) + pos_list = list(zip(ys, xs)) + + ### FIX-ME, eliminate outlier + if len(pos_list) < 5: + continue + + # add rule here + main_direction = extract_main_direction(pos_list, + f_direction) # y x + reference_directin = np.array([0, 1]).reshape([-1, 2]) # y x + is_h_angle = abs(np.sum( + main_direction * reference_directin)) < math.cos(math.pi / 180 * + 70) + + point_yxs = np.array(pos_list) + max_y, max_x = np.max(point_yxs, axis=0) + min_y, min_x = np.min(point_yxs, axis=0) + is_h_len = (max_y - min_y) < 1.5 * (max_x - min_x) + + pos_list_final = [] + if is_h_len: + xs = np.unique(xs) + for x in xs: + ys = instance_label_map[:, x].copy().reshape((-1, )) + y = int(np.where(ys == instance_id)[0].mean()) + pos_list_final.append((y, x)) + else: + ys = np.unique(ys) + for y in ys: + xs = instance_label_map[y, :].copy().reshape((-1, )) + x = int(np.where(xs == instance_id)[0].mean()) + pos_list_final.append((y, x)) + + pos_list_sorted, _ = sort_with_direction(pos_list_final, + f_direction) + all_pos_yxs.append(pos_list_sorted) + + # use decoder to filter backgroud points. + p_char_maps = p_char_maps.transpose([1, 2, 0]) + decode_res = ctc_decoder_for_image( + all_pos_yxs, logits_map=p_char_maps, keep_blank_in_idxs=True) + for decoded_str, keep_yxs_list in decode_res: + if is_backbone: + keep_yxs_list_with_id = add_id(keep_yxs_list, image_id=image_id) + instance_center_pos_yxs.append(keep_yxs_list_with_id) + else: + end_points_yxs.extend((keep_yxs_list[0], keep_yxs_list[-1])) + center_pos_yxs.extend(keep_yxs_list) + + if is_backbone: + return instance_center_pos_yxs + else: + return center_pos_yxs, end_points_yxs + + +def generate_pivot_list_slow(p_score, + p_char_maps, + f_direction, + score_thresh=0.5, + is_backbone=False, + is_curved=True, + image_id=0): + """ + Warp all the function together. + """ + if is_curved: + return generate_pivot_list_curved( + p_score, + p_char_maps, + f_direction, + score_thresh=score_thresh, + is_expand=True, + is_backbone=is_backbone, + image_id=image_id) + else: + return generate_pivot_list_horizontal( + p_score, + p_char_maps, + f_direction, + score_thresh=score_thresh, + is_backbone=is_backbone, + image_id=image_id) + + +# for refine module +def extract_main_direction(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + pos_list = np.array(pos_list) + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] + point_direction = point_direction[:, ::-1] # x, y -> y, x + average_direction = np.mean(point_direction, axis=0, keepdims=True) + average_direction = average_direction / ( + np.linalg.norm(average_direction) + 1e-6) + return average_direction + + +def sort_by_direction_with_image_id_deprecated(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[id, y, x], [id, y, x], [id, y, x] ...] + """ + pos_list_full = np.array(pos_list).reshape(-1, 3) + pos_list = pos_list_full[:, 1:] + point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist() + return sorted_list + + +def sort_by_direction_with_image_id(pos_list, f_direction): + """ + f_direction: h x w x 2 + pos_list: [[y, x], [y, x], [y, x] ...] + """ + + def sort_part_with_direction(pos_list_full, point_direction): + pos_list_full = np.array(pos_list_full).reshape(-1, 3) + pos_list = pos_list_full[:, 1:] + point_direction = np.array(point_direction).reshape(-1, 2) + average_direction = np.mean(point_direction, axis=0, keepdims=True) + pos_proj_leng = np.sum(pos_list * average_direction, axis=1) + sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist() + sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist() + return sorted_list, sorted_direction + + pos_list = np.array(pos_list).reshape(-1, 3) + point_direction = f_direction[pos_list[:, 1], pos_list[:, 2]] # x, y + point_direction = point_direction[:, ::-1] # x, y -> y, x + sorted_point, sorted_direction = sort_part_with_direction(pos_list, + point_direction) + + point_num = len(sorted_point) + if point_num >= 16: + middle_num = point_num // 2 + first_part_point = sorted_point[:middle_num] + first_point_direction = sorted_direction[:middle_num] + sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction( + first_part_point, first_point_direction) + + last_part_point = sorted_point[middle_num:] + last_point_direction = sorted_direction[middle_num:] + sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction( + last_part_point, last_point_direction) + sorted_point = sorted_fist_part_point + sorted_last_part_point + sorted_direction = sorted_fist_part_direction + sorted_last_part_direction + + return sorted_point + + +def generate_pivot_list_tt_inference(p_score, + p_char_maps, + f_direction, + score_thresh=0.5, + is_backbone=False, + is_curved=True, + image_id=0): + """ + return center point and end point of TCL instance; filter with the char maps; + """ + p_score = p_score[0] + f_direction = f_direction.transpose(1, 2, 0) + p_tcl_map = (p_score > score_thresh) * 1.0 + skeleton_map = thin(p_tcl_map) + instance_count, instance_label_map = cv2.connectedComponents( + skeleton_map.astype(np.uint8), connectivity=8) + + # get TCL Instance + all_pos_yxs = [] + if instance_count > 0: + for instance_id in range(1, instance_count): + pos_list = [] + ys, xs = np.where(instance_label_map == instance_id) + pos_list = list(zip(ys, xs)) + ### FIX-ME, eliminate outlier + if len(pos_list) < 3: + continue + pos_list_sorted = sort_and_expand_with_direction_v2( + pos_list, f_direction, p_tcl_map) + pos_list_sorted_with_id = add_id(pos_list_sorted, image_id=image_id) + all_pos_yxs.append(pos_list_sorted_with_id) + return all_pos_yxs diff --git a/modules/pytorchocr/utils/e2e_utils/pgnet_pp_utils.py b/modules/pytorchocr/utils/e2e_utils/pgnet_pp_utils.py new file mode 100644 index 0000000..ce35a28 --- /dev/null +++ b/modules/pytorchocr/utils/e2e_utils/pgnet_pp_utils.py @@ -0,0 +1,150 @@ + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import torch +import os +import sys + +__dir__ = os.path.dirname(__file__) +sys.path.append(__dir__) +sys.path.append(os.path.join(__dir__, '..')) +from .extract_textpoint_slow import * +from .extract_textpoint_fast import generate_pivot_list_fast, restore_poly + + +class PGNet_PostProcess(object): + # two different post-process + def __init__(self, character_dict_path, valid_set, score_thresh, outs_dict, + shape_list): + self.Lexicon_Table = get_dict(character_dict_path) + self.valid_set = valid_set + self.score_thresh = score_thresh + self.outs_dict = outs_dict + self.shape_list = shape_list + + def pg_postprocess_fast(self): + p_score = self.outs_dict['f_score'] + p_border = self.outs_dict['f_border'] + p_char = self.outs_dict['f_char'] + p_direction = self.outs_dict['f_direction'] + if isinstance(p_score, torch.Tensor): + p_score = p_score[0].numpy() + p_border = p_border[0].numpy() + p_direction = p_direction[0].numpy() + p_char = p_char[0].numpy() + else: + p_score = p_score[0] + p_border = p_border[0] + p_direction = p_direction[0] + p_char = p_char[0] + + src_h, src_w, ratio_h, ratio_w = self.shape_list[0] + instance_yxs_list, seq_strs = generate_pivot_list_fast( + p_score, + p_char, + p_direction, + self.Lexicon_Table, + score_thresh=self.score_thresh) + poly_list, keep_str_list = restore_poly(instance_yxs_list, seq_strs, + p_border, ratio_w, ratio_h, + src_w, src_h, self.valid_set) + data = { + 'points': poly_list, + 'texts': keep_str_list, + } + return data + + def pg_postprocess_slow(self): + p_score = self.outs_dict['f_score'] + p_border = self.outs_dict['f_border'] + p_char = self.outs_dict['f_char'] + p_direction = self.outs_dict['f_direction'] + if isinstance(p_score, torch.Tensor): + p_score = p_score[0].numpy() + p_border = p_border[0].numpy() + p_direction = p_direction[0].numpy() + p_char = p_char[0].numpy() + else: + p_score = p_score[0] + p_border = p_border[0] + p_direction = p_direction[0] + p_char = p_char[0] + src_h, src_w, ratio_h, ratio_w = self.shape_list[0] + is_curved = self.valid_set == "totaltext" + char_seq_idx_set, instance_yxs_list = generate_pivot_list_slow( + p_score, + p_char, + p_direction, + score_thresh=self.score_thresh, + is_backbone=True, + is_curved=is_curved) + seq_strs = [] + for char_idx_set in char_seq_idx_set: + pr_str = ''.join([self.Lexicon_Table[pos] for pos in char_idx_set]) + seq_strs.append(pr_str) + poly_list = [] + keep_str_list = [] + all_point_list = [] + all_point_pair_list = [] + for yx_center_line, keep_str in zip(instance_yxs_list, seq_strs): + if len(yx_center_line) == 1: + yx_center_line.append(yx_center_line[-1]) + + offset_expand = 1.0 + if self.valid_set == 'totaltext': + offset_expand = 1.2 + + point_pair_list = [] + for batch_id, y, x in yx_center_line: + offset = p_border[:, y, x].reshape(2, 2) + if offset_expand != 1.0: + offset_length = np.linalg.norm( + offset, axis=1, keepdims=True) + expand_length = np.clip( + offset_length * (offset_expand - 1), + a_min=0.5, + a_max=3.0) + offset_detal = offset / offset_length * expand_length + offset = offset + offset_detal + ori_yx = np.array([y, x], dtype=np.float32) + point_pair = (ori_yx + offset)[:, ::-1] * 4.0 / np.array( + [ratio_w, ratio_h]).reshape(-1, 2) + point_pair_list.append(point_pair) + + all_point_list.append([ + int(round(x * 4.0 / ratio_w)), + int(round(y * 4.0 / ratio_h)) + ]) + all_point_pair_list.append(point_pair.round().astype(np.int32) + .tolist()) + + detected_poly, pair_length_info = point_pair2poly(point_pair_list) + detected_poly = expand_poly_along_width( + detected_poly, shrink_ratio_of_width=0.2) + detected_poly[:, 0] = np.clip( + detected_poly[:, 0], a_min=0, a_max=src_w) + detected_poly[:, 1] = np.clip( + detected_poly[:, 1], a_min=0, a_max=src_h) + + if len(keep_str) < 2: + continue + + keep_str_list.append(keep_str) + detected_poly = np.round(detected_poly).astype('int32') + if self.valid_set == 'partvgg': + middle_point = len(detected_poly) // 2 + detected_poly = detected_poly[ + [0, middle_point - 1, middle_point, -1], :] + poly_list.append(detected_poly) + elif self.valid_set == 'totaltext': + poly_list.append(detected_poly) + else: + print('--> Not supported format.') + exit(-1) + data = { + 'points': poly_list, + 'texts': keep_str_list, + } + return data diff --git a/modules/pytorchocr/utils/e2e_utils/visual.py b/modules/pytorchocr/utils/e2e_utils/visual.py new file mode 100644 index 0000000..e6e4fd0 --- /dev/null +++ b/modules/pytorchocr/utils/e2e_utils/visual.py @@ -0,0 +1,162 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np +import cv2 +import time + + +def resize_image(im, max_side_len=512): + """ + resize image to a size multiple of max_stride which is required by the network + :param im: the resized image + :param max_side_len: limit of max image size to avoid out of memory in gpu + :return: the resized image and the resize ratio + """ + h, w, _ = im.shape + + resize_w = w + resize_h = h + + if resize_h > resize_w: + ratio = float(max_side_len) / resize_h + else: + ratio = float(max_side_len) / resize_w + + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(im, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + + return im, (ratio_h, ratio_w) + + +def resize_image_min(im, max_side_len=512): + """ + """ + h, w, _ = im.shape + + resize_w = w + resize_h = h + + if resize_h < resize_w: + ratio = float(max_side_len) / resize_h + else: + ratio = float(max_side_len) / resize_w + + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(im, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + return im, (ratio_h, ratio_w) + + +def resize_image_for_totaltext(im, max_side_len=512): + """ + """ + h, w, _ = im.shape + + resize_w = w + resize_h = h + ratio = 1.25 + if h * ratio > max_side_len: + ratio = float(max_side_len) / resize_h + + resize_h = int(resize_h * ratio) + resize_w = int(resize_w * ratio) + + max_stride = 128 + resize_h = (resize_h + max_stride - 1) // max_stride * max_stride + resize_w = (resize_w + max_stride - 1) // max_stride * max_stride + im = cv2.resize(im, (int(resize_w), int(resize_h))) + ratio_h = resize_h / float(h) + ratio_w = resize_w / float(w) + return im, (ratio_h, ratio_w) + + +def point_pair2poly(point_pair_list): + """ + Transfer vertical point_pairs into poly point in clockwise. + """ + pair_length_list = [] + for point_pair in point_pair_list: + pair_length = np.linalg.norm(point_pair[0] - point_pair[1]) + pair_length_list.append(pair_length) + pair_length_list = np.array(pair_length_list) + pair_info = (pair_length_list.max(), pair_length_list.min(), + pair_length_list.mean()) + + point_num = len(point_pair_list) * 2 + point_list = [0] * point_num + for idx, point_pair in enumerate(point_pair_list): + point_list[idx] = point_pair[0] + point_list[point_num - 1 - idx] = point_pair[1] + return np.array(point_list).reshape(-1, 2), pair_info + + +def shrink_quad_along_width(quad, begin_width_ratio=0., end_width_ratio=1.): + """ + Generate shrink_quad_along_width. + """ + ratio_pair = np.array( + [[begin_width_ratio], [end_width_ratio]], dtype=np.float32) + p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair + p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair + return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]]) + + +def expand_poly_along_width(poly, shrink_ratio_of_width=0.3): + """ + expand poly along width. + """ + point_num = poly.shape[0] + left_quad = np.array( + [poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32) + left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \ + (np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6) + left_quad_expand = shrink_quad_along_width(left_quad, left_ratio, 1.0) + right_quad = np.array( + [ + poly[point_num // 2 - 2], poly[point_num // 2 - 1], + poly[point_num // 2], poly[point_num // 2 + 1] + ], + dtype=np.float32) + right_ratio = 1.0 + \ + shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \ + (np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6) + right_quad_expand = shrink_quad_along_width(right_quad, 0.0, right_ratio) + poly[0] = left_quad_expand[0] + poly[-1] = left_quad_expand[-1] + poly[point_num // 2 - 1] = right_quad_expand[1] + poly[point_num // 2] = right_quad_expand[2] + return poly + + +def norm2(x, axis=None): + if axis: + return np.sqrt(np.sum(x**2, axis=axis)) + return np.sqrt(np.sum(x**2)) + + +def cos(p1, p2): + return (p1 * p2).sum() / (norm2(p1) * norm2(p2)) diff --git a/modules/pytorchocr/utils/en_dict.txt b/modules/pytorchocr/utils/en_dict.txt new file mode 100644 index 0000000..7677d31 --- /dev/null +++ b/modules/pytorchocr/utils/en_dict.txt @@ -0,0 +1,95 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +; +< += +> +? +@ +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +[ +\ +] +^ +_ +` +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +{ +| +} +~ +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ + diff --git a/modules/pytorchocr/utils/ic15_dict.txt b/modules/pytorchocr/utils/ic15_dict.txt new file mode 100644 index 0000000..4740603 --- /dev/null +++ b/modules/pytorchocr/utils/ic15_dict.txt @@ -0,0 +1,36 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z \ No newline at end of file diff --git a/modules/pytorchocr/utils/logging.py b/modules/pytorchocr/utils/logging.py new file mode 100644 index 0000000..68d0c2f --- /dev/null +++ b/modules/pytorchocr/utils/logging.py @@ -0,0 +1,52 @@ +import os +import sys +import logging +import functools +import torch.distributed as dist + +logger_initialized = {} + + +@functools.lru_cache() +def get_logger(name='root', log_file=None, log_level=logging.DEBUG): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified a FileHandler will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + formatter = logging.Formatter( + '[%(asctime)s] %(name)s %(levelname)s: %(message)s', + datefmt="%Y/%m/%d %H:%M:%S") + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setFormatter(formatter) + logger.addHandler(stream_handler) + if log_file is not None and dist.get_rank() == 0: + log_file_folder = os.path.split(log_file)[0] + os.makedirs(log_file_folder, exist_ok=True) + file_handler = logging.FileHandler(log_file, 'a') + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + # if dist.get_rank() == 0: + # logger.setLevel(log_level) + # else: + # logger.setLevel(logging.ERROR) + logger_initialized[name] = True + return logger \ No newline at end of file diff --git a/modules/pytorchocr/utils/poly_nms.py b/modules/pytorchocr/utils/poly_nms.py new file mode 100644 index 0000000..9dcb3d2 --- /dev/null +++ b/modules/pytorchocr/utils/poly_nms.py @@ -0,0 +1,146 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from shapely.geometry import Polygon + + +def points2polygon(points): + """Convert k points to 1 polygon. + + Args: + points (ndarray or list): A ndarray or a list of shape (2k) + that indicates k points. + + Returns: + polygon (Polygon): A polygon object. + """ + if isinstance(points, list): + points = np.array(points) + + assert isinstance(points, np.ndarray) + assert (points.size % 2 == 0) and (points.size >= 8) + + point_mat = points.reshape([-1, 2]) + return Polygon(point_mat) + + +def poly_intersection(poly_det, poly_gt, buffer=0.0001): + """Calculate the intersection area between two polygon. + + Args: + poly_det (Polygon): A polygon predicted by detector. + poly_gt (Polygon): A gt polygon. + + Returns: + intersection_area (float): The intersection area between two polygons. + """ + assert isinstance(poly_det, Polygon) + assert isinstance(poly_gt, Polygon) + + if buffer == 0: + poly_inter = poly_det & poly_gt + else: + poly_inter = poly_det.buffer(buffer) & poly_gt.buffer(buffer) + return poly_inter.area, poly_inter + + +def poly_union(poly_det, poly_gt): + """Calculate the union area between two polygon. + + Args: + poly_det (Polygon): A polygon predicted by detector. + poly_gt (Polygon): A gt polygon. + + Returns: + union_area (float): The union area between two polygons. + """ + assert isinstance(poly_det, Polygon) + assert isinstance(poly_gt, Polygon) + + area_det = poly_det.area + area_gt = poly_gt.area + area_inters, _ = poly_intersection(poly_det, poly_gt) + return area_det + area_gt - area_inters + + +def valid_boundary(x, with_score=True): + num = len(x) + if num < 8: + return False + if num % 2 == 0 and (not with_score): + return True + if num % 2 == 1 and with_score: + return True + + return False + + +def boundary_iou(src, target): + """Calculate the IOU between two boundaries. + + Args: + src (list): Source boundary. + target (list): Target boundary. + + Returns: + iou (float): The iou between two boundaries. + """ + assert valid_boundary(src, False) + assert valid_boundary(target, False) + src_poly = points2polygon(src) + target_poly = points2polygon(target) + + return poly_iou(src_poly, target_poly) + + +def poly_iou(poly_det, poly_gt): + """Calculate the IOU between two polygons. + + Args: + poly_det (Polygon): A polygon predicted by detector. + poly_gt (Polygon): A gt polygon. + + Returns: + iou (float): The IOU between two polygons. + """ + assert isinstance(poly_det, Polygon) + assert isinstance(poly_gt, Polygon) + area_inters, _ = poly_intersection(poly_det, poly_gt) + area_union = poly_union(poly_det, poly_gt) + if area_union == 0: + return 0.0 + return area_inters / area_union + + +def poly_nms(polygons, threshold): + assert isinstance(polygons, list) + + polygons = np.array(sorted(polygons, key=lambda x: x[-1])) + + keep_poly = [] + index = [i for i in range(polygons.shape[0])] + + while len(index) > 0: + keep_poly.append(polygons[index[-1]].tolist()) + A = polygons[index[-1]][:-1] + index = np.delete(index, -1) + iou_list = np.zeros((len(index), )) + for i in range(len(index)): + B = polygons[index[i]][:-1] + iou_list[i] = boundary_iou(A, B) + remove_index = np.where(iou_list > threshold) + index = np.delete(index, remove_index) + + return keep_poly diff --git a/modules/pytorchocr/utils/ppocr_keys_v1.txt b/modules/pytorchocr/utils/ppocr_keys_v1.txt new file mode 100644 index 0000000..84b885d --- /dev/null +++ b/modules/pytorchocr/utils/ppocr_keys_v1.txt @@ -0,0 +1,6623 @@ +' +疗 +绚 +诚 +娇 +溜 +题 +贿 +者 +廖 +更 +纳 +加 +奉 +公 +一 +就 +汴 +计 +与 +路 +房 +原 +妇 +2 +0 +8 +- +7 +其 +> +: +] +, +, +骑 +刈 +全 +消 +昏 +傈 +安 +久 +钟 +嗅 +不 +影 +处 +驽 +蜿 +资 +关 +椤 +地 +瘸 +专 +问 +忖 +票 +嫉 +炎 +韵 +要 +月 +田 +节 +陂 +鄙 +捌 +备 +拳 +伺 +眼 +网 +盎 +大 +傍 +心 +东 +愉 +汇 +蹿 +科 +每 +业 +里 +航 +晏 +字 +平 +录 +先 +1 +3 +彤 +鲶 +产 +稍 +督 +腴 +有 +象 +岳 +注 +绍 +在 +泺 +文 +定 +核 +名 +水 +过 +理 +让 +偷 +率 +等 +这 +发 +” +为 +含 +肥 +酉 +相 +鄱 +七 +编 +猥 +锛 +日 +镀 +蒂 +掰 +倒 +辆 +栾 +栗 +综 +涩 +州 +雌 +滑 +馀 +了 +机 +块 +司 +宰 +甙 +兴 +矽 +抚 +保 +用 +沧 +秩 +如 +收 +息 +滥 +页 +疑 +埠 +! +! +姥 +异 +橹 +钇 +向 +下 +跄 +的 +椴 +沫 +国 +绥 +獠 +报 +开 +民 +蜇 +何 +分 +凇 +长 +讥 +藏 +掏 +施 +羽 +中 +讲 +派 +嘟 +人 +提 +浼 +间 +世 +而 +古 +多 +倪 +唇 +饯 +控 +庚 +首 +赛 +蜓 +味 +断 +制 +觉 +技 +替 +艰 +溢 +潮 +夕 +钺 +外 +摘 +枋 +动 +双 +单 +啮 +户 +枇 +确 +锦 +曜 +杜 +或 +能 +效 +霜 +盒 +然 +侗 +电 +晁 +放 +步 +鹃 +新 +杖 +蜂 +吒 +濂 +瞬 +评 +总 +隍 +对 +独 +合 +也 +是 +府 +青 +天 +诲 +墙 +组 +滴 +级 +邀 +帘 +示 +已 +时 +骸 +仄 +泅 +和 +遨 +店 +雇 +疫 +持 +巍 +踮 +境 +只 +亨 +目 +鉴 +崤 +闲 +体 +泄 +杂 +作 +般 +轰 +化 +解 +迂 +诿 +蛭 +璀 +腾 +告 +版 +服 +省 +师 +小 +规 +程 +线 +海 +办 +引 +二 +桧 +牌 +砺 +洄 +裴 +修 +图 +痫 +胡 +许 +犊 +事 +郛 +基 +柴 +呼 +食 +研 +奶 +律 +蛋 +因 +葆 +察 +戏 +褒 +戒 +再 +李 +骁 +工 +貂 +油 +鹅 +章 +啄 +休 +场 +给 +睡 +纷 +豆 +器 +捎 +说 +敏 +学 +会 +浒 +设 +诊 +格 +廓 +查 +来 +霓 +室 +溆 +¢ +诡 +寥 +焕 +舜 +柒 +狐 +回 +戟 +砾 +厄 +实 +翩 +尿 +五 +入 +径 +惭 +喹 +股 +宇 +篝 +| +; +美 +期 +云 +九 +祺 +扮 +靠 +锝 +槌 +系 +企 +酰 +阊 +暂 +蚕 +忻 +豁 +本 +羹 +执 +条 +钦 +H +獒 +限 +进 +季 +楦 +于 +芘 +玖 +铋 +茯 +未 +答 +粘 +括 +样 +精 +欠 +矢 +甥 +帷 +嵩 +扣 +令 +仔 +风 +皈 +行 +支 +部 +蓉 +刮 +站 +蜡 +救 +钊 +汗 +松 +嫌 +成 +可 +. +鹤 +院 +从 +交 +政 +怕 +活 +调 +球 +局 +验 +髌 +第 +韫 +谗 +串 +到 +圆 +年 +米 +/ +* +友 +忿 +检 +区 +看 +自 +敢 +刃 +个 +兹 +弄 +流 +留 +同 +没 +齿 +星 +聆 +轼 +湖 +什 +三 +建 +蛔 +儿 +椋 +汕 +震 +颧 +鲤 +跟 +力 +情 +璺 +铨 +陪 +务 +指 +族 +训 +滦 +鄣 +濮 +扒 +商 +箱 +十 +召 +慷 +辗 +所 +莞 +管 +护 +臭 +横 +硒 +嗓 +接 +侦 +六 +露 +党 +馋 +驾 +剖 +高 +侬 +妪 +幂 +猗 +绺 +骐 +央 +酐 +孝 +筝 +课 +徇 +缰 +门 +男 +西 +项 +句 +谙 +瞒 +秃 +篇 +教 +碲 +罚 +声 +呐 +景 +前 +富 +嘴 +鳌 +稀 +免 +朋 +啬 +睐 +去 +赈 +鱼 +住 +肩 +愕 +速 +旁 +波 +厅 +健 +茼 +厥 +鲟 +谅 +投 +攸 +炔 +数 +方 +击 +呋 +谈 +绩 +别 +愫 +僚 +躬 +鹧 +胪 +炳 +招 +喇 +膨 +泵 +蹦 +毛 +结 +5 +4 +谱 +识 +陕 +粽 +婚 +拟 +构 +且 +搜 +任 +潘 +比 +郢 +妨 +醪 +陀 +桔 +碘 +扎 +选 +哈 +骷 +楷 +亿 +明 +缆 +脯 +监 +睫 +逻 +婵 +共 +赴 +淝 +凡 +惦 +及 +达 +揖 +谩 +澹 +减 +焰 +蛹 +番 +祁 +柏 +员 +禄 +怡 +峤 +龙 +白 +叽 +生 +闯 +起 +细 +装 +谕 +竟 +聚 +钙 +上 +导 +渊 +按 +艾 +辘 +挡 +耒 +盹 +饪 +臀 +记 +邮 +蕙 +受 +各 +医 +搂 +普 +滇 +朗 +茸 +带 +翻 +酚 +( +光 +堤 +墟 +蔷 +万 +幻 +〓 +瑙 +辈 +昧 +盏 +亘 +蛀 +吉 +铰 +请 +子 +假 +闻 +税 +井 +诩 +哨 +嫂 +好 +面 +琐 +校 +馊 +鬣 +缂 +营 +访 +炖 +占 +农 +缀 +否 +经 +钚 +棵 +趟 +张 +亟 +吏 +茶 +谨 +捻 +论 +迸 +堂 +玉 +信 +吧 +瞠 +乡 +姬 +寺 +咬 +溏 +苄 +皿 +意 +赉 +宝 +尔 +钰 +艺 +特 +唳 +踉 +都 +荣 +倚 +登 +荐 +丧 +奇 +涵 +批 +炭 +近 +符 +傩 +感 +道 +着 +菊 +虹 +仲 +众 +懈 +濯 +颞 +眺 +南 +释 +北 +缝 +标 +既 +茗 +整 +撼 +迤 +贲 +挎 +耱 +拒 +某 +妍 +卫 +哇 +英 +矶 +藩 +治 +他 +元 +领 +膜 +遮 +穗 +蛾 +飞 +荒 +棺 +劫 +么 +市 +火 +温 +拈 +棚 +洼 +转 +果 +奕 +卸 +迪 +伸 +泳 +斗 +邡 +侄 +涨 +屯 +萋 +胭 +氡 +崮 +枞 +惧 +冒 +彩 +斜 +手 +豚 +随 +旭 +淑 +妞 +形 +菌 +吲 +沱 +争 +驯 +歹 +挟 +兆 +柱 +传 +至 +包 +内 +响 +临 +红 +功 +弩 +衡 +寂 +禁 +老 +棍 +耆 +渍 +织 +害 +氵 +渑 +布 +载 +靥 +嗬 +虽 +苹 +咨 +娄 +库 +雉 +榜 +帜 +嘲 +套 +瑚 +亲 +簸 +欧 +边 +6 +腿 +旮 +抛 +吹 +瞳 +得 +镓 +梗 +厨 +继 +漾 +愣 +憨 +士 +策 +窑 +抑 +躯 +襟 +脏 +参 +贸 +言 +干 +绸 +鳄 +穷 +藜 +音 +折 +详 +) +举 +悍 +甸 +癌 +黎 +谴 +死 +罩 +迁 +寒 +驷 +袖 +媒 +蒋 +掘 +模 +纠 +恣 +观 +祖 +蛆 +碍 +位 +稿 +主 +澧 +跌 +筏 +京 +锏 +帝 +贴 +证 +糠 +才 +黄 +鲸 +略 +炯 +饱 +四 +出 +园 +犀 +牧 +容 +汉 +杆 +浈 +汰 +瑷 +造 +虫 +瘩 +怪 +驴 +济 +应 +花 +沣 +谔 +夙 +旅 +价 +矿 +以 +考 +s +u +呦 +晒 +巡 +茅 +准 +肟 +瓴 +詹 +仟 +褂 +译 +桌 +混 +宁 +怦 +郑 +抿 +些 +余 +鄂 +饴 +攒 +珑 +群 +阖 +岔 +琨 +藓 +预 +环 +洮 +岌 +宀 +杲 +瀵 +最 +常 +囡 +周 +踊 +女 +鼓 +袭 +喉 +简 +范 +薯 +遐 +疏 +粱 +黜 +禧 +法 +箔 +斤 +遥 +汝 +奥 +直 +贞 +撑 +置 +绱 +集 +她 +馅 +逗 +钧 +橱 +魉 +[ +恙 +躁 +唤 +9 +旺 +膘 +待 +脾 +惫 +购 +吗 +依 +盲 +度 +瘿 +蠖 +俾 +之 +镗 +拇 +鲵 +厝 +簧 +续 +款 +展 +啃 +表 +剔 +品 +钻 +腭 +损 +清 +锶 +统 +涌 +寸 +滨 +贪 +链 +吠 +冈 +伎 +迥 +咏 +吁 +览 +防 +迅 +失 +汾 +阔 +逵 +绀 +蔑 +列 +川 +凭 +努 +熨 +揪 +利 +俱 +绉 +抢 +鸨 +我 +即 +责 +膦 +易 +毓 +鹊 +刹 +玷 +岿 +空 +嘞 +绊 +排 +术 +估 +锷 +违 +们 +苟 +铜 +播 +肘 +件 +烫 +审 +鲂 +广 +像 +铌 +惰 +铟 +巳 +胍 +鲍 +康 +憧 +色 +恢 +想 +拷 +尤 +疳 +知 +S +Y +F +D +A +峄 +裕 +帮 +握 +搔 +氐 +氘 +难 +墒 +沮 +雨 +叁 +缥 +悴 +藐 +湫 +娟 +苑 +稠 +颛 +簇 +后 +阕 +闭 +蕤 +缚 +怎 +佞 +码 +嘤 +蔡 +痊 +舱 +螯 +帕 +赫 +昵 +升 +烬 +岫 +、 +疵 +蜻 +髁 +蕨 +隶 +烛 +械 +丑 +盂 +梁 +强 +鲛 +由 +拘 +揉 +劭 +龟 +撤 +钩 +呕 +孛 +费 +妻 +漂 +求 +阑 +崖 +秤 +甘 +通 +深 +补 +赃 +坎 +床 +啪 +承 +吼 +量 +暇 +钼 +烨 +阂 +擎 +脱 +逮 +称 +P +神 +属 +矗 +华 +届 +狍 +葑 +汹 +育 +患 +窒 +蛰 +佼 +静 +槎 +运 +鳗 +庆 +逝 +曼 +疱 +克 +代 +官 +此 +麸 +耧 +蚌 +晟 +例 +础 +榛 +副 +测 +唰 +缢 +迹 +灬 +霁 +身 +岁 +赭 +扛 +又 +菡 +乜 +雾 +板 +读 +陷 +徉 +贯 +郁 +虑 +变 +钓 +菜 +圾 +现 +琢 +式 +乐 +维 +渔 +浜 +左 +吾 +脑 +钡 +警 +T +啵 +拴 +偌 +漱 +湿 +硕 +止 +骼 +魄 +积 +燥 +联 +踢 +玛 +则 +窿 +见 +振 +畿 +送 +班 +钽 +您 +赵 +刨 +印 +讨 +踝 +籍 +谡 +舌 +崧 +汽 +蔽 +沪 +酥 +绒 +怖 +财 +帖 +肱 +私 +莎 +勋 +羔 +霸 +励 +哼 +帐 +将 +帅 +渠 +纪 +婴 +娩 +岭 +厘 +滕 +吻 +伤 +坝 +冠 +戊 +隆 +瘁 +介 +涧 +物 +黍 +并 +姗 +奢 +蹑 +掣 +垸 +锴 +命 +箍 +捉 +病 +辖 +琰 +眭 +迩 +艘 +绌 +繁 +寅 +若 +毋 +思 +诉 +类 +诈 +燮 +轲 +酮 +狂 +重 +反 +职 +筱 +县 +委 +磕 +绣 +奖 +晋 +濉 +志 +徽 +肠 +呈 +獐 +坻 +口 +片 +碰 +几 +村 +柿 +劳 +料 +获 +亩 +惕 +晕 +厌 +号 +罢 +池 +正 +鏖 +煨 +家 +棕 +复 +尝 +懋 +蜥 +锅 +岛 +扰 +队 +坠 +瘾 +钬 +@ +卧 +疣 +镇 +譬 +冰 +彷 +频 +黯 +据 +垄 +采 +八 +缪 +瘫 +型 +熹 +砰 +楠 +襁 +箐 +但 +嘶 +绳 +啤 +拍 +盥 +穆 +傲 +洗 +盯 +塘 +怔 +筛 +丿 +台 +恒 +喂 +葛 +永 +¥ +烟 +酒 +桦 +书 +砂 +蚝 +缉 +态 +瀚 +袄 +圳 +轻 +蛛 +超 +榧 +遛 +姒 +奘 +铮 +右 +荽 +望 +偻 +卡 +丶 +氰 +附 +做 +革 +索 +戚 +坨 +桷 +唁 +垅 +榻 +岐 +偎 +坛 +莨 +山 +殊 +微 +骇 +陈 +爨 +推 +嗝 +驹 +澡 +藁 +呤 +卤 +嘻 +糅 +逛 +侵 +郓 +酌 +德 +摇 +※ +鬃 +被 +慨 +殡 +羸 +昌 +泡 +戛 +鞋 +河 +宪 +沿 +玲 +鲨 +翅 +哽 +源 +铅 +语 +照 +邯 +址 +荃 +佬 +顺 +鸳 +町 +霭 +睾 +瓢 +夸 +椁 +晓 +酿 +痈 +咔 +侏 +券 +噎 +湍 +签 +嚷 +离 +午 +尚 +社 +锤 +背 +孟 +使 +浪 +缦 +潍 +鞅 +军 +姹 +驶 +笑 +鳟 +鲁 +》 +孽 +钜 +绿 +洱 +礴 +焯 +椰 +颖 +囔 +乌 +孔 +巴 +互 +性 +椽 +哞 +聘 +昨 +早 +暮 +胶 +炀 +隧 +低 +彗 +昝 +铁 +呓 +氽 +藉 +喔 +癖 +瑗 +姨 +权 +胱 +韦 +堑 +蜜 +酋 +楝 +砝 +毁 +靓 +歙 +锲 +究 +屋 +喳 +骨 +辨 +碑 +武 +鸠 +宫 +辜 +烊 +适 +坡 +殃 +培 +佩 +供 +走 +蜈 +迟 +翼 +况 +姣 +凛 +浔 +吃 +飘 +债 +犟 +金 +促 +苛 +崇 +坂 +莳 +畔 +绂 +兵 +蠕 +斋 +根 +砍 +亢 +欢 +恬 +崔 +剁 +餐 +榫 +快 +扶 +‖ +濒 +缠 +鳜 +当 +彭 +驭 +浦 +篮 +昀 +锆 +秸 +钳 +弋 +娣 +瞑 +夷 +龛 +苫 +拱 +致 +% +嵊 +障 +隐 +弑 +初 +娓 +抉 +汩 +累 +蓖 +" +唬 +助 +苓 +昙 +押 +毙 +破 +城 +郧 +逢 +嚏 +獭 +瞻 +溱 +婿 +赊 +跨 +恼 +璧 +萃 +姻 +貉 +灵 +炉 +密 +氛 +陶 +砸 +谬 +衔 +点 +琛 +沛 +枳 +层 +岱 +诺 +脍 +榈 +埂 +征 +冷 +裁 +打 +蹴 +素 +瘘 +逞 +蛐 +聊 +激 +腱 +萘 +踵 +飒 +蓟 +吆 +取 +咙 +簋 +涓 +矩 +曝 +挺 +揣 +座 +你 +史 +舵 +焱 +尘 +苏 +笈 +脚 +溉 +榨 +诵 +樊 +邓 +焊 +义 +庶 +儋 +蟋 +蒲 +赦 +呷 +杞 +诠 +豪 +还 +试 +颓 +茉 +太 +除 +紫 +逃 +痴 +草 +充 +鳕 +珉 +祗 +墨 +渭 +烩 +蘸 +慕 +璇 +镶 +穴 +嵘 +恶 +骂 +险 +绋 +幕 +碉 +肺 +戳 +刘 +潞 +秣 +纾 +潜 +銮 +洛 +须 +罘 +销 +瘪 +汞 +兮 +屉 +r +林 +厕 +质 +探 +划 +狸 +殚 +善 +煊 +烹 +〒 +锈 +逯 +宸 +辍 +泱 +柚 +袍 +远 +蹋 +嶙 +绝 +峥 +娥 +缍 +雀 +徵 +认 +镱 +谷 += +贩 +勉 +撩 +鄯 +斐 +洋 +非 +祚 +泾 +诒 +饿 +撬 +威 +晷 +搭 +芍 +锥 +笺 +蓦 +候 +琊 +档 +礁 +沼 +卵 +荠 +忑 +朝 +凹 +瑞 +头 +仪 +弧 +孵 +畏 +铆 +突 +衲 +车 +浩 +气 +茂 +悖 +厢 +枕 +酝 +戴 +湾 +邹 +飚 +攘 +锂 +写 +宵 +翁 +岷 +无 +喜 +丈 +挑 +嗟 +绛 +殉 +议 +槽 +具 +醇 +淞 +笃 +郴 +阅 +饼 +底 +壕 +砚 +弈 +询 +缕 +庹 +翟 +零 +筷 +暨 +舟 +闺 +甯 +撞 +麂 +茌 +蔼 +很 +珲 +捕 +棠 +角 +阉 +媛 +娲 +诽 +剿 +尉 +爵 +睬 +韩 +诰 +匣 +危 +糍 +镯 +立 +浏 +阳 +少 +盆 +舔 +擘 +匪 +申 +尬 +铣 +旯 +抖 +赘 +瓯 +居 +ˇ +哮 +游 +锭 +茏 +歌 +坏 +甚 +秒 +舞 +沙 +仗 +劲 +潺 +阿 +燧 +郭 +嗖 +霏 +忠 +材 +奂 +耐 +跺 +砀 +输 +岖 +媳 +氟 +极 +摆 +灿 +今 +扔 +腻 +枝 +奎 +药 +熄 +吨 +话 +q +额 +慑 +嘌 +协 +喀 +壳 +埭 +视 +著 +於 +愧 +陲 +翌 +峁 +颅 +佛 +腹 +聋 +侯 +咎 +叟 +秀 +颇 +存 +较 +罪 +哄 +岗 +扫 +栏 +钾 +羌 +己 +璨 +枭 +霉 +煌 +涸 +衿 +键 +镝 +益 +岢 +奏 +连 +夯 +睿 +冥 +均 +糖 +狞 +蹊 +稻 +爸 +刿 +胥 +煜 +丽 +肿 +璃 +掸 +跚 +灾 +垂 +樾 +濑 +乎 +莲 +窄 +犹 +撮 +战 +馄 +软 +络 +显 +鸢 +胸 +宾 +妲 +恕 +埔 +蝌 +份 +遇 +巧 +瞟 +粒 +恰 +剥 +桡 +博 +讯 +凯 +堇 +阶 +滤 +卖 +斌 +骚 +彬 +兑 +磺 +樱 +舷 +两 +娱 +福 +仃 +差 +找 +桁 +÷ +净 +把 +阴 +污 +戬 +雷 +碓 +蕲 +楚 +罡 +焖 +抽 +妫 +咒 +仑 +闱 +尽 +邑 +菁 +爱 +贷 +沥 +鞑 +牡 +嗉 +崴 +骤 +塌 +嗦 +订 +拮 +滓 +捡 +锻 +次 +坪 +杩 +臃 +箬 +融 +珂 +鹗 +宗 +枚 +降 +鸬 +妯 +阄 +堰 +盐 +毅 +必 +杨 +崃 +俺 +甬 +状 +莘 +货 +耸 +菱 +腼 +铸 +唏 +痤 +孚 +澳 +懒 +溅 +翘 +疙 +杷 +淼 +缙 +骰 +喊 +悉 +砻 +坷 +艇 +赁 +界 +谤 +纣 +宴 +晃 +茹 +归 +饭 +梢 +铡 +街 +抄 +肼 +鬟 +苯 +颂 +撷 +戈 +炒 +咆 +茭 +瘙 +负 +仰 +客 +琉 +铢 +封 +卑 +珥 +椿 +镧 +窨 +鬲 +寿 +御 +袤 +铃 +萎 +砖 +餮 +脒 +裳 +肪 +孕 +嫣 +馗 +嵇 +恳 +氯 +江 +石 +褶 +冢 +祸 +阻 +狈 +羞 +银 +靳 +透 +咳 +叼 +敷 +芷 +啥 +它 +瓤 +兰 +痘 +懊 +逑 +肌 +往 +捺 +坊 +甩 +呻 +〃 +沦 +忘 +膻 +祟 +菅 +剧 +崆 +智 +坯 +臧 +霍 +墅 +攻 +眯 +倘 +拢 +骠 +铐 +庭 +岙 +瓠 +′ +缺 +泥 +迢 +捶 +? +? +郏 +喙 +掷 +沌 +纯 +秘 +种 +听 +绘 +固 +螨 +团 +香 +盗 +妒 +埚 +蓝 +拖 +旱 +荞 +铀 +血 +遏 +汲 +辰 +叩 +拽 +幅 +硬 +惶 +桀 +漠 +措 +泼 +唑 +齐 +肾 +念 +酱 +虚 +屁 +耶 +旗 +砦 +闵 +婉 +馆 +拭 +绅 +韧 +忏 +窝 +醋 +葺 +顾 +辞 +倜 +堆 +辋 +逆 +玟 +贱 +疾 +董 +惘 +倌 +锕 +淘 +嘀 +莽 +俭 +笏 +绑 +鲷 +杈 +择 +蟀 +粥 +嗯 +驰 +逾 +案 +谪 +褓 +胫 +哩 +昕 +颚 +鲢 +绠 +躺 +鹄 +崂 +儒 +俨 +丝 +尕 +泌 +啊 +萸 +彰 +幺 +吟 +骄 +苣 +弦 +脊 +瑰 +〈 +诛 +镁 +析 +闪 +剪 +侧 +哟 +框 +螃 +守 +嬗 +燕 +狭 +铈 +缮 +概 +迳 +痧 +鲲 +俯 +售 +笼 +痣 +扉 +挖 +满 +咋 +援 +邱 +扇 +歪 +便 +玑 +绦 +峡 +蛇 +叨 +〖 +泽 +胃 +斓 +喋 +怂 +坟 +猪 +该 +蚬 +炕 +弥 +赞 +棣 +晔 +娠 +挲 +狡 +创 +疖 +铕 +镭 +稷 +挫 +弭 +啾 +翔 +粉 +履 +苘 +哦 +楼 +秕 +铂 +土 +锣 +瘟 +挣 +栉 +习 +享 +桢 +袅 +磨 +桂 +谦 +延 +坚 +蔚 +噗 +署 +谟 +猬 +钎 +恐 +嬉 +雒 +倦 +衅 +亏 +璩 +睹 +刻 +殿 +王 +算 +雕 +麻 +丘 +柯 +骆 +丸 +塍 +谚 +添 +鲈 +垓 +桎 +蚯 +芥 +予 +飕 +镦 +谌 +窗 +醚 +菀 +亮 +搪 +莺 +蒿 +羁 +足 +J +真 +轶 +悬 +衷 +靛 +翊 +掩 +哒 +炅 +掐 +冼 +妮 +l +谐 +稚 +荆 +擒 +犯 +陵 +虏 +浓 +崽 +刍 +陌 +傻 +孜 +千 +靖 +演 +矜 +钕 +煽 +杰 +酗 +渗 +伞 +栋 +俗 +泫 +戍 +罕 +沾 +疽 +灏 +煦 +芬 +磴 +叱 +阱 +榉 +湃 +蜀 +叉 +醒 +彪 +租 +郡 +篷 +屎 +良 +垢 +隗 +弱 +陨 +峪 +砷 +掴 +颁 +胎 +雯 +绵 +贬 +沐 +撵 +隘 +篙 +暖 +曹 +陡 +栓 +填 +臼 +彦 +瓶 +琪 +潼 +哪 +鸡 +摩 +啦 +俟 +锋 +域 +耻 +蔫 +疯 +纹 +撇 +毒 +绶 +痛 +酯 +忍 +爪 +赳 +歆 +嘹 +辕 +烈 +册 +朴 +钱 +吮 +毯 +癜 +娃 +谀 +邵 +厮 +炽 +璞 +邃 +丐 +追 +词 +瓒 +忆 +轧 +芫 +谯 +喷 +弟 +半 +冕 +裙 +掖 +墉 +绮 +寝 +苔 +势 +顷 +褥 +切 +衮 +君 +佳 +嫒 +蚩 +霞 +佚 +洙 +逊 +镖 +暹 +唛 +& +殒 +顶 +碗 +獗 +轭 +铺 +蛊 +废 +恹 +汨 +崩 +珍 +那 +杵 +曲 +纺 +夏 +薰 +傀 +闳 +淬 +姘 +舀 +拧 +卷 +楂 +恍 +讪 +厩 +寮 +篪 +赓 +乘 +灭 +盅 +鞣 +沟 +慎 +挂 +饺 +鼾 +杳 +树 +缨 +丛 +絮 +娌 +臻 +嗳 +篡 +侩 +述 +衰 +矛 +圈 +蚜 +匕 +筹 +匿 +濞 +晨 +叶 +骋 +郝 +挚 +蚴 +滞 +增 +侍 +描 +瓣 +吖 +嫦 +蟒 +匾 +圣 +赌 +毡 +癞 +恺 +百 +曳 +需 +篓 +肮 +庖 +帏 +卿 +驿 +遗 +蹬 +鬓 +骡 +歉 +芎 +胳 +屐 +禽 +烦 +晌 +寄 +媾 +狄 +翡 +苒 +船 +廉 +终 +痞 +殇 +々 +畦 +饶 +改 +拆 +悻 +萄 +£ +瓿 +乃 +訾 +桅 +匮 +溧 +拥 +纱 +铍 +骗 +蕃 +龋 +缬 +父 +佐 +疚 +栎 +醍 +掳 +蓄 +x +惆 +颜 +鲆 +榆 +〔 +猎 +敌 +暴 +谥 +鲫 +贾 +罗 +玻 +缄 +扦 +芪 +癣 +落 +徒 +臾 +恿 +猩 +托 +邴 +肄 +牵 +春 +陛 +耀 +刊 +拓 +蓓 +邳 +堕 +寇 +枉 +淌 +啡 +湄 +兽 +酷 +萼 +碚 +濠 +萤 +夹 +旬 +戮 +梭 +琥 +椭 +昔 +勺 +蜊 +绐 +晚 +孺 +僵 +宣 +摄 +冽 +旨 +萌 +忙 +蚤 +眉 +噼 +蟑 +付 +契 +瓜 +悼 +颡 +壁 +曾 +窕 +颢 +澎 +仿 +俑 +浑 +嵌 +浣 +乍 +碌 +褪 +乱 +蔟 +隙 +玩 +剐 +葫 +箫 +纲 +围 +伐 +决 +伙 +漩 +瑟 +刑 +肓 +镳 +缓 +蹭 +氨 +皓 +典 +畲 +坍 +铑 +檐 +塑 +洞 +倬 +储 +胴 +淳 +戾 +吐 +灼 +惺 +妙 +毕 +珐 +缈 +虱 +盖 +羰 +鸿 +磅 +谓 +髅 +娴 +苴 +唷 +蚣 +霹 +抨 +贤 +唠 +犬 +誓 +逍 +庠 +逼 +麓 +籼 +釉 +呜 +碧 +秧 +氩 +摔 +霄 +穸 +纨 +辟 +妈 +映 +完 +牛 +缴 +嗷 +炊 +恩 +荔 +茆 +掉 +紊 +慌 +莓 +羟 +阙 +萁 +磐 +另 +蕹 +辱 +鳐 +湮 +吡 +吩 +唐 +睦 +垠 +舒 +圜 +冗 +瞿 +溺 +芾 +囱 +匠 +僳 +汐 +菩 +饬 +漓 +黑 +霰 +浸 +濡 +窥 +毂 +蒡 +兢 +驻 +鹉 +芮 +诙 +迫 +雳 +厂 +忐 +臆 +猴 +鸣 +蚪 +栈 +箕 +羡 +渐 +莆 +捍 +眈 +哓 +趴 +蹼 +埕 +嚣 +骛 +宏 +淄 +斑 +噜 +严 +瑛 +垃 +椎 +诱 +压 +庾 +绞 +焘 +廿 +抡 +迄 +棘 +夫 +纬 +锹 +眨 +瞌 +侠 +脐 +竞 +瀑 +孳 +骧 +遁 +姜 +颦 +荪 +滚 +萦 +伪 +逸 +粳 +爬 +锁 +矣 +役 +趣 +洒 +颔 +诏 +逐 +奸 +甭 +惠 +攀 +蹄 +泛 +尼 +拼 +阮 +鹰 +亚 +颈 +惑 +勒 +〉 +际 +肛 +爷 +刚 +钨 +丰 +养 +冶 +鲽 +辉 +蔻 +画 +覆 +皴 +妊 +麦 +返 +醉 +皂 +擀 +〗 +酶 +凑 +粹 +悟 +诀 +硖 +港 +卜 +z +杀 +涕 +± +舍 +铠 +抵 +弛 +段 +敝 +镐 +奠 +拂 +轴 +跛 +袱 +e +t +沉 +菇 +俎 +薪 +峦 +秭 +蟹 +历 +盟 +菠 +寡 +液 +肢 +喻 +染 +裱 +悱 +抱 +氙 +赤 +捅 +猛 +跑 +氮 +谣 +仁 +尺 +辊 +窍 +烙 +衍 +架 +擦 +倏 +璐 +瑁 +币 +楞 +胖 +夔 +趸 +邛 +惴 +饕 +虔 +蝎 +§ +哉 +贝 +宽 +辫 +炮 +扩 +饲 +籽 +魏 +菟 +锰 +伍 +猝 +末 +琳 +哚 +蛎 +邂 +呀 +姿 +鄞 +却 +歧 +仙 +恸 +椐 +森 +牒 +寤 +袒 +婆 +虢 +雅 +钉 +朵 +贼 +欲 +苞 +寰 +故 +龚 +坭 +嘘 +咫 +礼 +硷 +兀 +睢 +汶 +’ +铲 +烧 +绕 +诃 +浃 +钿 +哺 +柜 +讼 +颊 +璁 +腔 +洽 +咐 +脲 +簌 +筠 +镣 +玮 +鞠 +谁 +兼 +姆 +挥 +梯 +蝴 +谘 +漕 +刷 +躏 +宦 +弼 +b +垌 +劈 +麟 +莉 +揭 +笙 +渎 +仕 +嗤 +仓 +配 +怏 +抬 +错 +泯 +镊 +孰 +猿 +邪 +仍 +秋 +鼬 +壹 +歇 +吵 +炼 +< +尧 +射 +柬 +廷 +胧 +霾 +凳 +隋 +肚 +浮 +梦 +祥 +株 +堵 +退 +L +鹫 +跎 +凶 +毽 +荟 +炫 +栩 +玳 +甜 +沂 +鹿 +顽 +伯 +爹 +赔 +蛴 +徐 +匡 +欣 +狰 +缸 +雹 +蟆 +疤 +默 +沤 +啜 +痂 +衣 +禅 +w +i +h +辽 +葳 +黝 +钗 +停 +沽 +棒 +馨 +颌 +肉 +吴 +硫 +悯 +劾 +娈 +马 +啧 +吊 +悌 +镑 +峭 +帆 +瀣 +涉 +咸 +疸 +滋 +泣 +翦 +拙 +癸 +钥 +蜒 ++ +尾 +庄 +凝 +泉 +婢 +渴 +谊 +乞 +陆 +锉 +糊 +鸦 +淮 +I +B +N +晦 +弗 +乔 +庥 +葡 +尻 +席 +橡 +傣 +渣 +拿 +惩 +麋 +斛 +缃 +矮 +蛏 +岘 +鸽 +姐 +膏 +催 +奔 +镒 +喱 +蠡 +摧 +钯 +胤 +柠 +拐 +璋 +鸥 +卢 +荡 +倾 +^ +_ +珀 +逄 +萧 +塾 +掇 +贮 +笆 +聂 +圃 +冲 +嵬 +M +滔 +笕 +值 +炙 +偶 +蜱 +搐 +梆 +汪 +蔬 +腑 +鸯 +蹇 +敞 +绯 +仨 +祯 +谆 +梧 +糗 +鑫 +啸 +豺 +囹 +猾 +巢 +柄 +瀛 +筑 +踌 +沭 +暗 +苁 +鱿 +蹉 +脂 +蘖 +牢 +热 +木 +吸 +溃 +宠 +序 +泞 +偿 +拜 +檩 +厚 +朐 +毗 +螳 +吞 +媚 +朽 +担 +蝗 +橘 +畴 +祈 +糟 +盱 +隼 +郜 +惜 +珠 +裨 +铵 +焙 +琚 +唯 +咚 +噪 +骊 +丫 +滢 +勤 +棉 +呸 +咣 +淀 +隔 +蕾 +窈 +饨 +挨 +煅 +短 +匙 +粕 +镜 +赣 +撕 +墩 +酬 +馁 +豌 +颐 +抗 +酣 +氓 +佑 +搁 +哭 +递 +耷 +涡 +桃 +贻 +碣 +截 +瘦 +昭 +镌 +蔓 +氚 +甲 +猕 +蕴 +蓬 +散 +拾 +纛 +狼 +猷 +铎 +埋 +旖 +矾 +讳 +囊 +糜 +迈 +粟 +蚂 +紧 +鲳 +瘢 +栽 +稼 +羊 +锄 +斟 +睁 +桥 +瓮 +蹙 +祉 +醺 +鼻 +昱 +剃 +跳 +篱 +跷 +蒜 +翎 +宅 +晖 +嗑 +壑 +峻 +癫 +屏 +狠 +陋 +袜 +途 +憎 +祀 +莹 +滟 +佶 +溥 +臣 +约 +盛 +峰 +磁 +慵 +婪 +拦 +莅 +朕 +鹦 +粲 +裤 +哎 +疡 +嫖 +琵 +窟 +堪 +谛 +嘉 +儡 +鳝 +斩 +郾 +驸 +酊 +妄 +胜 +贺 +徙 +傅 +噌 +钢 +栅 +庇 +恋 +匝 +巯 +邈 +尸 +锚 +粗 +佟 +蛟 +薹 +纵 +蚊 +郅 +绢 +锐 +苗 +俞 +篆 +淆 +膀 +鲜 +煎 +诶 +秽 +寻 +涮 +刺 +怀 +噶 +巨 +褰 +魅 +灶 +灌 +桉 +藕 +谜 +舸 +薄 +搀 +恽 +借 +牯 +痉 +渥 +愿 +亓 +耘 +杠 +柩 +锔 +蚶 +钣 +珈 +喘 +蹒 +幽 +赐 +稗 +晤 +莱 +泔 +扯 +肯 +菪 +裆 +腩 +豉 +疆 +骜 +腐 +倭 +珏 +唔 +粮 +亡 +润 +慰 +伽 +橄 +玄 +誉 +醐 +胆 +龊 +粼 +塬 +陇 +彼 +削 +嗣 +绾 +芽 +妗 +垭 +瘴 +爽 +薏 +寨 +龈 +泠 +弹 +赢 +漪 +猫 +嘧 +涂 +恤 +圭 +茧 +烽 +屑 +痕 +巾 +赖 +荸 +凰 +腮 +畈 +亵 +蹲 +偃 +苇 +澜 +艮 +换 +骺 +烘 +苕 +梓 +颉 +肇 +哗 +悄 +氤 +涠 +葬 +屠 +鹭 +植 +竺 +佯 +诣 +鲇 +瘀 +鲅 +邦 +移 +滁 +冯 +耕 +癔 +戌 +茬 +沁 +巩 +悠 +湘 +洪 +痹 +锟 +循 +谋 +腕 +鳃 +钠 +捞 +焉 +迎 +碱 +伫 +急 +榷 +奈 +邝 +卯 +辄 +皲 +卟 +醛 +畹 +忧 +稳 +雄 +昼 +缩 +阈 +睑 +扌 +耗 +曦 +涅 +捏 +瞧 +邕 +淖 +漉 +铝 +耦 +禹 +湛 +喽 +莼 +琅 +诸 +苎 +纂 +硅 +始 +嗨 +傥 +燃 +臂 +赅 +嘈 +呆 +贵 +屹 +壮 +肋 +亍 +蚀 +卅 +豹 +腆 +邬 +迭 +浊 +} +童 +螂 +捐 +圩 +勐 +触 +寞 +汊 +壤 +荫 +膺 +渌 +芳 +懿 +遴 +螈 +泰 +蓼 +蛤 +茜 +舅 +枫 +朔 +膝 +眙 +避 +梅 +判 +鹜 +璜 +牍 +缅 +垫 +藻 +黔 +侥 +惚 +懂 +踩 +腰 +腈 +札 +丞 +唾 +慈 +顿 +摹 +荻 +琬 +~ +斧 +沈 +滂 +胁 +胀 +幄 +莜 +Z +匀 +鄄 +掌 +绰 +茎 +焚 +赋 +萱 +谑 +汁 +铒 +瞎 +夺 +蜗 +野 +娆 +冀 +弯 +篁 +懵 +灞 +隽 +芡 +脘 +俐 +辩 +芯 +掺 +喏 +膈 +蝈 +觐 +悚 +踹 +蔗 +熠 +鼠 +呵 +抓 +橼 +峨 +畜 +缔 +禾 +崭 +弃 +熊 +摒 +凸 +拗 +穹 +蒙 +抒 +祛 +劝 +闫 +扳 +阵 +醌 +踪 +喵 +侣 +搬 +仅 +荧 +赎 +蝾 +琦 +买 +婧 +瞄 +寓 +皎 +冻 +赝 +箩 +莫 +瞰 +郊 +笫 +姝 +筒 +枪 +遣 +煸 +袋 +舆 +痱 +涛 +母 +〇 +启 +践 +耙 +绲 +盘 +遂 +昊 +搞 +槿 +诬 +纰 +泓 +惨 +檬 +亻 +越 +C +o +憩 +熵 +祷 +钒 +暧 +塔 +阗 +胰 +咄 +娶 +魔 +琶 +钞 +邻 +扬 +杉 +殴 +咽 +弓 +〆 +髻 +】 +吭 +揽 +霆 +拄 +殖 +脆 +彻 +岩 +芝 +勃 +辣 +剌 +钝 +嘎 +甄 +佘 +皖 +伦 +授 +徕 +憔 +挪 +皇 +庞 +稔 +芜 +踏 +溴 +兖 +卒 +擢 +饥 +鳞 +煲 +‰ +账 +颗 +叻 +斯 +捧 +鳍 +琮 +讹 +蛙 +纽 +谭 +酸 +兔 +莒 +睇 +伟 +觑 +羲 +嗜 +宜 +褐 +旎 +辛 +卦 +诘 +筋 +鎏 +溪 +挛 +熔 +阜 +晰 +鳅 +丢 +奚 +灸 +呱 +献 +陉 +黛 +鸪 +甾 +萨 +疮 +拯 +洲 +疹 +辑 +叙 +恻 +谒 +允 +柔 +烂 +氏 +逅 +漆 +拎 +惋 +扈 +湟 +纭 +啕 +掬 +擞 +哥 +忽 +涤 +鸵 +靡 +郗 +瓷 +扁 +廊 +怨 +雏 +钮 +敦 +E +懦 +憋 +汀 +拚 +啉 +腌 +岸 +f +痼 +瞅 +尊 +咀 +眩 +飙 +忌 +仝 +迦 +熬 +毫 +胯 +篑 +茄 +腺 +凄 +舛 +碴 +锵 +诧 +羯 +後 +漏 +汤 +宓 +仞 +蚁 +壶 +谰 +皑 +铄 +棰 +罔 +辅 +晶 +苦 +牟 +闽 +\ +烃 +饮 +聿 +丙 +蛳 +朱 +煤 +涔 +鳖 +犁 +罐 +荼 +砒 +淦 +妤 +黏 +戎 +孑 +婕 +瑾 +戢 +钵 +枣 +捋 +砥 +衩 +狙 +桠 +稣 +阎 +肃 +梏 +诫 +孪 +昶 +婊 +衫 +嗔 +侃 +塞 +蜃 +樵 +峒 +貌 +屿 +欺 +缫 +阐 +栖 +诟 +珞 +荭 +吝 +萍 +嗽 +恂 +啻 +蜴 +磬 +峋 +俸 +豫 +谎 +徊 +镍 +韬 +魇 +晴 +U +囟 +猜 +蛮 +坐 +囿 +伴 +亭 +肝 +佗 +蝠 +妃 +胞 +滩 +榴 +氖 +垩 +苋 +砣 +扪 +馏 +姓 +轩 +厉 +夥 +侈 +禀 +垒 +岑 +赏 +钛 +辐 +痔 +披 +纸 +碳 +“ +坞 +蠓 +挤 +荥 +沅 +悔 +铧 +帼 +蒌 +蝇 +a +p +y +n +g +哀 +浆 +瑶 +凿 +桶 +馈 +皮 +奴 +苜 +佤 +伶 +晗 +铱 +炬 +优 +弊 +氢 +恃 +甫 +攥 +端 +锌 +灰 +稹 +炝 +曙 +邋 +亥 +眶 +碾 +拉 +萝 +绔 +捷 +浍 +腋 +姑 +菖 +凌 +涞 +麽 +锢 +桨 +潢 +绎 +镰 +殆 +锑 +渝 +铬 +困 +绽 +觎 +匈 +糙 +暑 +裹 +鸟 +盔 +肽 +迷 +綦 +『 +亳 +佝 +俘 +钴 +觇 +骥 +仆 +疝 +跪 +婶 +郯 +瀹 +唉 +脖 +踞 +针 +晾 +忒 +扼 +瞩 +叛 +椒 +疟 +嗡 +邗 +肆 +跆 +玫 +忡 +捣 +咧 +唆 +艄 +蘑 +潦 +笛 +阚 +沸 +泻 +掊 +菽 +贫 +斥 +髂 +孢 +镂 +赂 +麝 +鸾 +屡 +衬 +苷 +恪 +叠 +希 +粤 +爻 +喝 +茫 +惬 +郸 +绻 +庸 +撅 +碟 +宄 +妹 +膛 +叮 +饵 +崛 +嗲 +椅 +冤 +搅 +咕 +敛 +尹 +垦 +闷 +蝉 +霎 +勰 +败 +蓑 +泸 +肤 +鹌 +幌 +焦 +浠 +鞍 +刁 +舰 +乙 +竿 +裔 +。 +茵 +函 +伊 +兄 +丨 +娜 +匍 +謇 +莪 +宥 +似 +蝽 +翳 +酪 +翠 +粑 +薇 +祢 +骏 +赠 +叫 +Q +噤 +噻 +竖 +芗 +莠 +潭 +俊 +羿 +耜 +O +郫 +趁 +嗪 +囚 +蹶 +芒 +洁 +笋 +鹑 +敲 +硝 +啶 +堡 +渲 +揩 +』 +携 +宿 +遒 +颍 +扭 +棱 +割 +萜 +蔸 +葵 +琴 +捂 +饰 +衙 +耿 +掠 +募 +岂 +窖 +涟 +蔺 +瘤 +柞 +瞪 +怜 +匹 +距 +楔 +炜 +哆 +秦 +缎 +幼 +茁 +绪 +痨 +恨 +楸 +娅 +瓦 +桩 +雪 +嬴 +伏 +榔 +妥 +铿 +拌 +眠 +雍 +缇 +‘ +卓 +搓 +哌 +觞 +噩 +屈 +哧 +髓 +咦 +巅 +娑 +侑 +淫 +膳 +祝 +勾 +姊 +莴 +胄 +疃 +薛 +蜷 +胛 +巷 +芙 +芋 +熙 +闰 +勿 +窃 +狱 +剩 +钏 +幢 +陟 +铛 +慧 +靴 +耍 +k +浙 +浇 +飨 +惟 +绗 +祜 +澈 +啼 +咪 +磷 +摞 +诅 +郦 +抹 +跃 +壬 +吕 +肖 +琏 +颤 +尴 +剡 +抠 +凋 +赚 +泊 +津 +宕 +殷 +倔 +氲 +漫 +邺 +涎 +怠 +$ +垮 +荬 +遵 +俏 +叹 +噢 +饽 +蜘 +孙 +筵 +疼 +鞭 +羧 +牦 +箭 +潴 +c +眸 +祭 +髯 +啖 +坳 +愁 +芩 +驮 +倡 +巽 +穰 +沃 +胚 +怒 +凤 +槛 +剂 +趵 +嫁 +v +邢 +灯 +鄢 +桐 +睽 +檗 +锯 +槟 +婷 +嵋 +圻 +诗 +蕈 +颠 +遭 +痢 +芸 +怯 +馥 +竭 +锗 +徜 +恭 +遍 +籁 +剑 +嘱 +苡 +龄 +僧 +桑 +潸 +弘 +澶 +楹 +悲 +讫 +愤 +腥 +悸 +谍 +椹 +呢 +桓 +葭 +攫 +阀 +翰 +躲 +敖 +柑 +郎 +笨 +橇 +呃 +魁 +燎 +脓 +葩 +磋 +垛 +玺 +狮 +沓 +砜 +蕊 +锺 +罹 +蕉 +翱 +虐 +闾 +巫 +旦 +茱 +嬷 +枯 +鹏 +贡 +芹 +汛 +矫 +绁 +拣 +禺 +佃 +讣 +舫 +惯 +乳 +趋 +疲 +挽 +岚 +虾 +衾 +蠹 +蹂 +飓 +氦 +铖 +孩 +稞 +瑜 +壅 +掀 +勘 +妓 +畅 +髋 +W +庐 +牲 +蓿 +榕 +练 +垣 +唱 +邸 +菲 +昆 +婺 +穿 +绡 +麒 +蚱 +掂 +愚 +泷 +涪 +漳 +妩 +娉 +榄 +讷 +觅 +旧 +藤 +煮 +呛 +柳 +腓 +叭 +庵 +烷 +阡 +罂 +蜕 +擂 +猖 +咿 +媲 +脉 +【 +沏 +貅 +黠 +熏 +哲 +烁 +坦 +酵 +兜 +× +潇 +撒 +剽 +珩 +圹 +乾 +摸 +樟 +帽 +嗒 +襄 +魂 +轿 +憬 +锡 +〕 +喃 +皆 +咖 +隅 +脸 +残 +泮 +袂 +鹂 +珊 +囤 +捆 +咤 +误 +徨 +闹 +淙 +芊 +淋 +怆 +囗 +拨 +梳 +渤 +R +G +绨 +蚓 +婀 +幡 +狩 +麾 +谢 +唢 +裸 +旌 +伉 +纶 +裂 +驳 +砼 +咛 +澄 +樨 +蹈 +宙 +澍 +倍 +貔 +操 +勇 +蟠 +摈 +砧 +虬 +够 +缁 +悦 +藿 +撸 +艹 +摁 +淹 +豇 +虎 +榭 +ˉ +吱 +d +° +喧 +荀 +踱 +侮 +奋 +偕 +饷 +犍 +惮 +坑 +璎 +徘 +宛 +妆 +袈 +倩 +窦 +昂 +荏 +乖 +K +怅 +撰 +鳙 +牙 +袁 +酞 +X +痿 +琼 +闸 +雁 +趾 +荚 +虻 +涝 +《 +杏 +韭 +偈 +烤 +绫 +鞘 +卉 +症 +遢 +蓥 +诋 +杭 +荨 +匆 +竣 +簪 +辙 +敕 +虞 +丹 +缭 +咩 +黟 +m +淤 +瑕 +咂 +铉 +硼 +茨 +嶂 +痒 +畸 +敬 +涿 +粪 +窘 +熟 +叔 +嫔 +盾 +忱 +裘 +憾 +梵 +赡 +珙 +咯 +娘 +庙 +溯 +胺 +葱 +痪 +摊 +荷 +卞 +乒 +髦 +寐 +铭 +坩 +胗 +枷 +爆 +溟 +嚼 +羚 +砬 +轨 +惊 +挠 +罄 +竽 +菏 +氧 +浅 +楣 +盼 +枢 +炸 +阆 +杯 +谏 +噬 +淇 +渺 +俪 +秆 +墓 +泪 +跻 +砌 +痰 +垡 +渡 +耽 +釜 +讶 +鳎 +煞 +呗 +韶 +舶 +绷 +鹳 +缜 +旷 +铊 +皱 +龌 +檀 +霖 +奄 +槐 +艳 +蝶 +旋 +哝 +赶 +骞 +蚧 +腊 +盈 +丁 +` +蜚 +矸 +蝙 +睨 +嚓 +僻 +鬼 +醴 +夜 +彝 +磊 +笔 +拔 +栀 +糕 +厦 +邰 +纫 +逭 +纤 +眦 +膊 +馍 +躇 +烯 +蘼 +冬 +诤 +暄 +骶 +哑 +瘠 +」 +臊 +丕 +愈 +咱 +螺 +擅 +跋 +搏 +硪 +谄 +笠 +淡 +嘿 +骅 +谧 +鼎 +皋 +姚 +歼 +蠢 +驼 +耳 +胬 +挝 +涯 +狗 +蒽 +孓 +犷 +凉 +芦 +箴 +铤 +孤 +嘛 +坤 +V +茴 +朦 +挞 +尖 +橙 +诞 +搴 +碇 +洵 +浚 +帚 +蜍 +漯 +柘 +嚎 +讽 +芭 +荤 +咻 +祠 +秉 +跖 +埃 +吓 +糯 +眷 +馒 +惹 +娼 +鲑 +嫩 +讴 +轮 +瞥 +靶 +褚 +乏 +缤 +宋 +帧 +删 +驱 +碎 +扑 +俩 +俄 +偏 +涣 +竹 +噱 +皙 +佰 +渚 +唧 +斡 +# +镉 +刀 +崎 +筐 +佣 +夭 +贰 +肴 +峙 +哔 +艿 +匐 +牺 +镛 +缘 +仡 +嫡 +劣 +枸 +堀 +梨 +簿 +鸭 +蒸 +亦 +稽 +浴 +{ +衢 +束 +槲 +j +阁 +揍 +疥 +棋 +潋 +聪 +窜 +乓 +睛 +插 +冉 +阪 +苍 +搽 +「 +蟾 +螟 +幸 +仇 +樽 +撂 +慢 +跤 +幔 +俚 +淅 +覃 +觊 +溶 +妖 +帛 +侨 +曰 +妾 +泗 +· +: +瀘 +風 +Ë +( +) +∶ +紅 +紗 +瑭 +雲 +頭 +鶏 +財 +許 +• +¥ +樂 +焗 +麗 +— +; +滙 +東 +榮 +繪 +興 +… +門 +業 +π +楊 +國 +顧 +é +盤 +寳 +Λ +龍 +鳳 +島 +誌 +緣 +結 +銭 +萬 +勝 +祎 +璟 +優 +歡 +臨 +時 +購 += +★ +藍 +昇 +鐵 +觀 +勅 +農 +聲 +畫 +兿 +術 +發 +劉 +記 +專 +耑 +園 +書 +壴 +種 +Ο +● +褀 +號 +銀 +匯 +敟 +锘 +葉 +橪 +廣 +進 +蒄 +鑽 +阝 +祙 +貢 +鍋 +豊 +夬 +喆 +團 +閣 +開 +燁 +賓 +館 +酡 +沔 +順 ++ +硚 +劵 +饸 +陽 +車 +湓 +復 +萊 +氣 +軒 +華 +堃 +迮 +纟 +戶 +馬 +學 +裡 +電 +嶽 +獨 +マ +シ +サ +ジ +燘 +袪 +環 +❤ +臺 +灣 +専 +賣 +孖 +聖 +攝 +線 +▪ +α +傢 +俬 +夢 +達 +莊 +喬 +貝 +薩 +劍 +羅 +壓 +棛 +饦 +尃 +璈 +囍 +醫 +G +I +A +# +N +鷄 +髙 +嬰 +啓 +約 +隹 +潔 +賴 +藝 +~ +寶 +籣 +麺 +  +嶺 +√ +義 +網 +峩 +長 +∧ +魚 +機 +構 +② +鳯 +偉 +L +B +㙟 +畵 +鴿 +' +詩 +溝 +嚞 +屌 +藔 +佧 +玥 +蘭 +織 +1 +3 +9 +0 +7 +點 +砭 +鴨 +鋪 +銘 +廳 +弍 +‧ +創 +湯 +坶 +℃ +卩 +骝 +& +烜 +荘 +當 +潤 +扞 +係 +懷 +碶 +钅 +蚨 +讠 +☆ +叢 +爲 +埗 +涫 +塗 +→ +楽 +現 +鯨 +愛 +瑪 +鈺 +忄 +悶 +藥 +飾 +樓 +視 +孬 +ㆍ +燚 +苪 +師 +① +丼 +锽 +│ +韓 +標 +è +兒 +閏 +匋 +張 +漢 +Ü +髪 +會 +閑 +檔 +習 +裝 +の +峯 +菘 +輝 +И +雞 +釣 +億 +浐 +K +O +R +8 +H +E +P +T +W +D +S +C +M +F +姌 +饹 +» +晞 +廰 +ä +嵯 +鷹 +負 +飲 +絲 +冚 +楗 +澤 +綫 +區 +❋ +← +質 +靑 +揚 +③ +滬 +統 +産 +協 +﹑ +乸 +畐 +經 +運 +際 +洺 +岽 +為 +粵 +諾 +崋 +豐 +碁 +ɔ +V +2 +6 +齋 +誠 +訂 +´ +勑 +雙 +陳 +無 +í +泩 +媄 +夌 +刂 +i +c +t +o +r +a +嘢 +耄 +燴 +暃 +壽 +媽 +靈 +抻 +體 +唻 +É +冮 +甹 +鎮 +錦 +ʌ +蜛 +蠄 +尓 +駕 +戀 +飬 +逹 +倫 +貴 +極 +Я +Й +寬 +磚 +嶪 +郎 +職 +| +間 +n +d +剎 +伈 +課 +飛 +橋 +瘊 +№ +譜 +骓 +圗 +滘 +縣 +粿 +咅 +養 +濤 +彳 +® +% +Ⅱ +啰 +㴪 +見 +矞 +薬 +糁 +邨 +鲮 +顔 +罱 +З +選 +話 +贏 +氪 +俵 +競 +瑩 +繡 +枱 +β +綉 +á +獅 +爾 +™ +麵 +戋 +淩 +徳 +個 +劇 +場 +務 +簡 +寵 +h +實 +膠 +轱 +圖 +築 +嘣 +樹 +㸃 +營 +耵 +孫 +饃 +鄺 +飯 +麯 +遠 +輸 +坫 +孃 +乚 +閃 +鏢 +㎡ +題 +廠 +關 +↑ +爺 +將 +軍 +連 +篦 +覌 +參 +箸 +- +窠 +棽 +寕 +夀 +爰 +歐 +呙 +閥 +頡 +熱 +雎 +垟 +裟 +凬 +勁 +帑 +馕 +夆 +疌 +枼 +馮 +貨 +蒤 +樸 +彧 +旸 +靜 +龢 +暢 +㐱 +鳥 +珺 +鏡 +灡 +爭 +堷 +廚 +Ó +騰 +診 +┅ +蘇 +褔 +凱 +頂 +豕 +亞 +帥 +嘬 +⊥ +仺 +桖 +複 +饣 +絡 +穂 +顏 +棟 +納 +▏ +濟 +親 +設 +計 +攵 +埌 +烺 +ò +頤 +燦 +蓮 +撻 +節 +講 +濱 +濃 +娽 +洳 +朿 +燈 +鈴 +護 +膚 +铔 +過 +補 +Z +U +5 +4 +坋 +闿 +䖝 +餘 +缐 +铞 +貿 +铪 +桼 +趙 +鍊 +[ +㐂 +垚 +菓 +揸 +捲 +鐘 +滏 +𣇉 +爍 +輪 +燜 +鴻 +鮮 +動 +鹞 +鷗 +丄 +慶 +鉌 +翥 +飮 +腸 +⇋ +漁 +覺 +來 +熘 +昴 +翏 +鲱 +圧 +鄉 +萭 +頔 +爐 +嫚 +г +貭 +類 +聯 +幛 +輕 +訓 +鑒 +夋 +锨 +芃 +珣 +䝉 +扙 +嵐 +銷 +處 +ㄱ +語 +誘 +苝 +歸 +儀 +燒 +楿 +內 +粢 +葒 +奧 +麥 +礻 +滿 +蠔 +穵 +瞭 +態 +鱬 +榞 +硂 +鄭 +黃 +煙 +祐 +奓 +逺 +* +瑄 +獲 +聞 +薦 +讀 +這 +樣 +決 +問 +啟 +們 +執 +説 +轉 +單 +隨 +唘 +帶 +倉 +庫 +還 +贈 +尙 +皺 +■ +餅 +產 +○ +∈ +報 +狀 +楓 +賠 +琯 +嗮 +禮 +` +傳 +> +≤ +嗞 +Φ +≥ +換 +咭 +∣ +↓ +曬 +ε +応 +寫 +″ +終 +様 +純 +費 +療 +聨 +凍 +壐 +郵 +ü +黒 +∫ +製 +塊 +調 +軽 +確 +撃 +級 +馴 +Ⅲ +涇 +繹 +數 +碼 +證 +狒 +処 +劑 +< +晧 +賀 +衆 +] +櫥 +兩 +陰 +絶 +對 +鯉 +憶 +◎ +p +e +Y +蕒 +煖 +頓 +測 +試 +鼽 +僑 +碩 +妝 +帯 +≈ +鐡 +舖 +權 +喫 +倆 +ˋ +該 +悅 +ā +俫 +. +f +s +b +m +k +g +u +j +貼 +淨 +濕 +針 +適 +備 +l +/ +給 +謢 +強 +觸 +衛 +與 +⊙ +$ +緯 +變 +⑴ +⑵ +⑶ +㎏ +殺 +∩ +幚 +─ +價 +▲ +離 +ú +ó +飄 +烏 +関 +閟 +﹝ +﹞ +邏 +輯 +鍵 +驗 +訣 +導 +歷 +屆 +層 +▼ +儱 +錄 +熳 +ē +艦 +吋 +錶 +辧 +飼 +顯 +④ +禦 +販 +気 +対 +枰 +閩 +紀 +幹 +瞓 +貊 +淚 +△ +眞 +墊 +Ω +獻 +褲 +縫 +緑 +亜 +鉅 +餠 +{ +} +◆ +蘆 +薈 +█ +◇ +溫 +彈 +晳 +粧 +犸 +穩 +訊 +崬 +凖 +熥 +П +舊 +條 +紋 +圍 +Ⅳ +筆 +尷 +難 +雜 +錯 +綁 +識 +頰 +鎖 +艶 +□ +殁 +殼 +⑧ +├ +▕ +鵬 +ǐ +ō +ǒ +糝 +綱 +▎ +μ +盜 +饅 +醬 +籤 +蓋 +釀 +鹽 +據 +à +ɡ +辦 +◥ +彐 +┌ +婦 +獸 +鲩 +伱 +ī +蒟 +蒻 +齊 +袆 +腦 +寧 +凈 +妳 +煥 +詢 +偽 +謹 +啫 +鯽 +騷 +鱸 +損 +傷 +鎻 +髮 +買 +冏 +儥 +両 +﹢ +∞ +載 +喰 +z +羙 +悵 +燙 +曉 +員 +組 +徹 +艷 +痠 +鋼 +鼙 +縮 +細 +嚒 +爯 +≠ +維 +" +鱻 +壇 +厍 +帰 +浥 +犇 +薡 +軎 +² +應 +醜 +刪 +緻 +鶴 +賜 +噁 +軌 +尨 +镔 +鷺 +槗 +彌 +葚 +濛 +請 +溇 +緹 +賢 +訪 +獴 +瑅 +資 +縤 +陣 +蕟 +栢 +韻 +祼 +恁 +伢 +謝 +劃 +涑 +總 +衖 +踺 +砋 +凉 +籃 +駿 +苼 +瘋 +昽 +紡 +驊 +腎 +﹗ +響 +杋 +剛 +嚴 +禪 +歓 +槍 +傘 +檸 +檫 +炣 +勢 +鏜 +鎢 +銑 +尐 +減 +奪 +惡 +θ +僮 +婭 +臘 +ū +ì +殻 +鉄 +∑ +蛲 +焼 +緖 +續 +紹 +懮 \ No newline at end of file diff --git a/modules/pytorchocr/utils/utility.py b/modules/pytorchocr/utils/utility.py new file mode 100644 index 0000000..70bb7b7 --- /dev/null +++ b/modules/pytorchocr/utils/utility.py @@ -0,0 +1,70 @@ +import os +import imghdr +import cv2 +import logging + +def get_image_file_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + + img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif', 'GIF'} + if os.path.isfile(img_file) and imghdr.what(img_file) in img_end: + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + file_path = os.path.join(img_file, single_file) + if imghdr.what(file_path) in img_end: + imgs_lists.append(file_path) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + return imgs_lists + + +def check_and_read_gif(img_path): + if os.path.basename(img_path)[-3:] in ['gif', 'GIF']: + gif = cv2.VideoCapture(img_path) + ret, frame = gif.read() + if not ret: + # logger = logging.getLogger('ppocr') + print("Cannot read {}. This gif image maybe corrupted.") + # logger.info("Cannot read {}. This gif image maybe corrupted.") + return None, False + if len(frame.shape) == 2 or frame.shape[-1] == 1: + frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) + imgvalue = frame[:, :, ::-1] + return imgvalue, True + return None, False + + +def check_and_read(img_path): + if os.path.basename(img_path)[-3:] in ['gif', 'GIF']: + gif = cv2.VideoCapture(img_path) + ret, frame = gif.read() + if not ret: + logger = logging.getLogger('ppocr') + logger.info("Cannot read {}. This gif image maybe corrupted.") + return None, False + if len(frame.shape) == 2 or frame.shape[-1] == 1: + frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) + imgvalue = frame[:, :, ::-1] + return imgvalue, True, False + elif os.path.basename(img_path)[-3:] in ['pdf']: + import fitz + from PIL import Image + imgs = [] + with fitz.open(img_path) as pdf: + for pg in range(0, pdf.pageCount): + page = pdf[pg] + mat = fitz.Matrix(2, 2) + pm = page.getPixmap(matrix=mat, alpha=False) + + # if width or height > 2000 pixels, don't enlarge the image + if pm.width > 2000 or pm.height > 2000: + pm = page.getPixmap(matrix=fitz.Matrix(1, 1), alpha=False) + + img = Image.frombytes("RGB", [pm.width, pm.height], pm.samples) + img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) + imgs.append(img) + return imgs, False, True + return None, False, False \ No newline at end of file diff --git a/rough_layout.py b/rough_layout.py index 938b4c9..7720395 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -241,7 +241,10 @@ def clean_layout_dets(layout_dets): def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmodel=None, inner_batch_size=4, batch_size=32,num_workers=8,timer=Timers(False)): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, - mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt)) + + mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt), + det_pre_transform=ocrmodel.batch + ) dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=custom_collate_fn, num_workers=num_workers) featcher = DataPrefetcher(dataloader,device='cuda') From addccda1503fbe616bd68e108d1d077c967ee060 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 31 Jul 2024 05:38:44 +0000 Subject: [PATCH 13/74] we pass the layout+mfd+det+rec test --- modules/batch_text_detector.py | 296 +++++++++++++++++- .../pytorchocr/postprocess/db_postprocess.py | 27 +- rough_layout.py | 174 ++++++---- 3 files changed, 409 insertions(+), 88 deletions(-) diff --git a/modules/batch_text_detector.py b/modules/batch_text_detector.py index 2e7088f..2387466 100644 --- a/modules/batch_text_detector.py +++ b/modules/batch_text_detector.py @@ -153,6 +153,12 @@ def clip_det_res(self, points, img_height, img_width): points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1)) return points + def clip_det_res_batch(self, points, img_height, img_width): + # Clip the points to the image borders + points[:, :, 0] = np.clip(points[:, :, 0], 0, img_width - 1) + points[:, :, 1] = np.clip(points[:, :, 1], 0, img_height - 1) + return points + def filter_tag_det_res(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] @@ -167,6 +173,55 @@ def filter_tag_det_res(self, dt_boxes, image_shape): dt_boxes = np.array(dt_boxes_new) return dt_boxes + def order_points_clockwise_batch(self, pts_batch): + """ + Orders a batch of points in a clockwise manner. + + Args: + pts_batch (numpy.ndarray): Array of shape (N, 4, 2) containing N sets of four points. + + Returns: + numpy.ndarray: Array of shape (N, 4, 2) with points ordered as top-left, top-right, + bottom-right, bottom-left. + """ + # Sort points in each set by x-coordinates + xSorted = np.sort(pts_batch, axis=1, order=['x']) + + # Separate left-most and right-most points + leftMost = xSorted[:, :2, :] + rightMost = xSorted[:, 2:, :] + + # Sort left-most points by y-coordinates + leftMost = leftMost[np.argsort(leftMost[:, :, 1], axis=1)] + tl = leftMost[:, 0, :] + bl = leftMost[:, 1, :] + + # Sort right-most points by y-coordinates + rightMost = rightMost[np.argsort(rightMost[:, :, 1], axis=1)] + tr = rightMost[:, 0, :] + br = rightMost[:, 1, :] + + # Combine the points into the ordered rectangle + rect = np.stack((tl, tr, br, bl), axis=1) + return rect + + def filter_tag_det_res_new(self, dt_boxes, image_shape): + img_height, img_width = image_shape[0:2] + + # Order points clockwise and clip them + ordered_boxes = self.order_points_clockwise_batch(dt_boxes) + clipped_boxes = self.clip_det_res_batch(ordered_boxes,img_height, img_width) + + # Calculate widths and heights + widths = np.linalg.norm(clipped_boxes[:, 0] - clipped_boxes[:, 1], axis=1).astype(int) + heights = np.linalg.norm(clipped_boxes[:, 0] - clipped_boxes[:, 3], axis=1).astype(int) + + # Filter out boxes with width or height <= 3 + valid_indices = (widths > 3) & (heights > 3) + dt_boxes_new = clipped_boxes[valid_indices] + + return dt_boxes_new + def filter_tag_det_res_only_clip(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] @@ -312,6 +367,12 @@ def __init__(self, **kwargs): args = Namespace(**fast_config) super().__init__(args, **kwargs) + def batch_forward(self, _input_image_batch,shape_list_batch,ori_shape_list): + with torch.no_grad(): + dt_boxaes_batch = self.net(_input_image_batch) + pred_batch = self.discard_batch(dt_boxaes_batch) + dt_boxes_list=self.batch_postprocess(pred_batch, shape_list_batch,ori_shape_list) + return dt_boxes_list def batch_process(self, img_batch, ori_shape_list): _input_image_batch = [] shape_list_batch = [] @@ -321,11 +382,7 @@ def batch_process(self, img_batch, ori_shape_list): shape_list_batch.append(shape_list) _input_image_batch = torch.cat(_input_image_batch) shape_list_batch = np.stack(shape_list_batch) - with torch.no_grad(): - dt_boxaes_batch = self.net(_input_image_batch) - pred_batch = self.discard_batch(dt_boxaes_batch) - dt_boxes_list=self.batch_postprocess(pred_batch, shape_list_batch,ori_shape_list) - return dt_boxes_list + return self.batch_forward(self, _input_image_batch,shape_list_batch,ori_shape_list) def discard_batch(self, outputs): preds = {} @@ -348,16 +405,235 @@ def discard_batch(self, outputs): raise NotImplementedError return preds + def fast_postprocess(self,preds, shape_list ): + #return fast_torch_postprocess(self.postprocess_op,preds, shape_list) + if len(shape_list) == 1: + return fast_torch_postprocess(self.postprocess_op,preds, shape_list) + else: + return fast_torch_postprocess_multiprocess(self.postprocess_op,preds, shape_list) + + def batch_postprocess(self, preds_list, shape_list_list,ori_shape_list): dt_boxes_list=[] for preds, shape_list,ori_shape in zip(preds_list, shape_list_list,ori_shape_list): - post_result = self.postprocess_op(preds, shape_list) + post_result = self.fast_postprocess(preds, shape_list) dt_boxes = post_result[0]['points'] - if (self.det_algorithm == "SAST" and - self.det_sast_polygon) or (self.det_algorithm in ["PSE", "FCE"] and - self.postprocess_op.box_type == 'poly'): + if (self.det_algorithm == "SAST" and self.det_sast_polygon) or (self.det_algorithm in ["PSE", "FCE"] and self.postprocess_op.box_type == 'poly'): + raise NotImplementedError dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_shape) else: dt_boxes = self.filter_tag_det_res(dt_boxes, ori_shape) dt_boxes_list.append(dt_boxes) - return dt_boxes_list \ No newline at end of file + return dt_boxes_list + +def fast_torch_postprocess(self, outs_dict, shape_list): + """ + Accelerate below + def __call__(self, outs_dict, shape_list): + pred = outs_dict['maps'] + pred = pred[:, 0, :, :] + segmentation = pred > self.thresh + if isinstance(segmentation, torch.Tensor): + segmentation = segmentation.cpu().numpy() + + + boxes_batch = [] + for batch_index in range(pred.shape[0]): + src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] + if self.dilation_kernel is not None: + mask = cv2.dilate(np.array(segmentation[batch_index]).astype(np.uint8),self.dilation_kernel) + else: + mask = segmentation[batch_index] + boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,src_w, src_h) + + boxes_batch.append({'points': boxes}) + return boxes_batch + """ + pred_batch = outs_dict['maps'] + pred_batch = pred_batch[:, 0, :, :] + segmentation_batch = pred_batch > self.thresh + if isinstance(segmentation_batch, torch.Tensor):segmentation_batch = segmentation_batch.cpu().numpy() + boxes_batch = [] + for batch_index in range(pred_batch.shape[0]): + src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] + boxes, scores = boxes_from_bitmap(self,pred_batch[batch_index], segmentation_batch[batch_index],src_w, src_h) + #boxes = boxes_from_bitmap_without_score(self,segmentation_batch[batch_index],src_w, src_h) + boxes_batch.append({'points': boxes}) + return boxes_batch + +def get_contours_multiprocess(segmentation_mask): + """Process a single segmentation batch and find contours.""" + outs= cv2.findContours((segmentation_mask * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + if len(outs) == 3: + img, contours, _ = outs[0], outs[1], outs[2] + elif len(outs) == 2: + contours, _ = outs[0], outs[1] + return contours +from concurrent.futures import ThreadPoolExecutor +def fast_torch_postprocess_multiprocess(self, outs_dict, shape_list): + """ + Accelerate below + def __call__(self, outs_dict, shape_list): + pred = outs_dict['maps'] + pred = pred[:, 0, :, :] + segmentation = pred > self.thresh + if isinstance(segmentation, torch.Tensor): + segmentation = segmentation.cpu().numpy() + + + boxes_batch = [] + for batch_index in range(pred.shape[0]): + src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] + if self.dilation_kernel is not None: + mask = cv2.dilate(np.array(segmentation[batch_index]).astype(np.uint8),self.dilation_kernel) + else: + mask = segmentation[batch_index] + boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,src_w, src_h) + + boxes_batch.append({'points': boxes}) + return boxes_batch + """ + pred_batch = outs_dict['maps'] + pred_batch = pred_batch[:, 0, :, :] + segmentation_batch = pred_batch > self.thresh + if isinstance(segmentation_batch, torch.Tensor):segmentation_batch = segmentation_batch.cpu().numpy() + + num_threads = min(8, len(segmentation_batch)) + + with ThreadPoolExecutor(max_workers=num_threads) as executor: + contours_batch = list(executor.map(get_contours_multiprocess, segmentation_batch)) + + boxes_batch = [] + for batch_index in range(pred_batch.shape[0]): + src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] + #boxes, scores = boxes_from_bitmap(self,pred_batch[batch_index], segmentation_batch[batch_index],src_w, src_h) + boxes = boxes_from_contours(self,pred_batch[batch_index],contours_batch[batch_index],src_w, src_h) + boxes_batch.append({'points': boxes}) + + # def boxes_from_bitmap_without_score_wrapper(args): + # return boxes_from_bitmap_without_score(self, *args) + # with ThreadPoolExecutor(max_workers=num_threads) as executor: + # src_h_list=[src_h for src_h, src_w, ratio_h, ratio_w in shape_list] + # src_w_list=[src_w for src_h, src_w, ratio_h, ratio_w in shape_list] + # boxes_batch = list(executor.map(boxes_from_bitmap_without_score_wrapper, zip(segmentation_batch,src_w_list,src_h_list))) + return boxes_batch + +def boxes_from_contours(self, pred, contours, dest_width, dest_height): + ''' + _bitmap: single map with shape (1, H, W), + whose values are binarized as {0, 1} + ''' + + height, width = pred.shape + num_contours = min(len(contours), self.max_candidates) + + boxes = [] + scores = [] + for index in range(num_contours): + contour = contours[index] + points, sside = self.get_mini_boxes(contour) + if sside < self.min_size:continue + points = np.array(points) + score = box_score_fast(pred, points.reshape(-1, 2)) + if self.box_thresh > score:continue + box = self.unclip(points).reshape(-1, 1, 2) + box, sside = self.get_mini_boxes(box) + if sside < self.min_size + 2:continue + box = np.array(box) + box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) + box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height) + boxes.append(box) + scores.append(score) + return np.array(boxes), scores + +def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height): + ''' + _bitmap: single map with shape (1, H, W), + whose values are binarized as {0, 1} + ''' + + bitmap = _bitmap + height, width = bitmap.shape + + outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + if len(outs) == 3: + img, contours, _ = outs[0], outs[1], outs[2] + elif len(outs) == 2: + contours, _ = outs[0], outs[1] + num_contours = min(len(contours), self.max_candidates) + + boxes = [] + scores = [] + for index in range(num_contours): + contour = contours[index] + points, sside = self.get_mini_boxes(contour) + if sside < self.min_size:continue + points =np.array(points) + score = box_score_fast(pred, points.reshape(-1, 2)) + if self.box_thresh > score:continue + box = self.unclip(points).reshape(-1, 1, 2) + box, sside = self.get_mini_boxes(box) + if sside < self.min_size + 2:continue + box = np.array(box) + box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) + box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height) + boxes.append(box) + scores.append(score) + return np.array(boxes), scores + +def boxes_from_bitmap_without_score(self, _bitmap, dest_width, dest_height): + ''' + _bitmap: single map with shape (1, H, W), + whose values are binarized as {0, 1} + ''' + + bitmap = _bitmap + height, width = bitmap.shape + + outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + if len(outs) == 3: + img, contours, _ = outs[0], outs[1], outs[2] + elif len(outs) == 2: + contours, _ = outs[0], outs[1] + + num_contours = min(len(contours), self.max_candidates) + + boxes = [] + for index in range(num_contours): + contour = contours[index] + points, sside = self.get_mini_boxes(contour) + if sside < self.min_size:continue + points =np.array(points) + box = self.unclip(points).reshape(-1, 1, 2) + box, sside = self.get_mini_boxes(box) + if sside < self.min_size + 2:continue + box = np.array(box) + box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) + box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height) + boxes.append(box.astype(np.int16)) + return np.array(boxes, dtype=np.int16) + +def obtain_score_mask(_box, h, w): + #h, w = bitmap.shape[:2] + box = _box.copy() + xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1) + xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1) + ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1) + ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1) + + mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) + box[:, 0] = box[:, 0] - xmin + box[:, 1] = box[:, 1] - ymin + cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1) + return xmin, xmax, ymin, ymax, mask + +def box_score_fast(bitmap, _box): + ''' + box_score_fast: use bbox mean score as the mean score + ''' + h, w = bitmap.shape[:2] + xmin, xmax, ymin, ymax, mask = obtain_score_mask(_box,h, w) + crop = bitmap[ymin:ymax + 1, xmin:xmax + 1] + mask = torch.BoolTensor(mask) + return crop[mask].mean().item() + \ No newline at end of file diff --git a/modules/pytorchocr/postprocess/db_postprocess.py b/modules/pytorchocr/postprocess/db_postprocess.py index 5305109..cd17474 100644 --- a/modules/pytorchocr/postprocess/db_postprocess.py +++ b/modules/pytorchocr/postprocess/db_postprocess.py @@ -48,8 +48,7 @@ def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height): bitmap = _bitmap height, width = bitmap.shape - outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, - cv2.CHAIN_APPROX_SIMPLE) + outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) if len(outs) == 3: img, contours, _ = outs[0], outs[1], outs[2] elif len(outs) == 2: @@ -69,19 +68,15 @@ def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height): score = self.box_score_fast(pred, points.reshape(-1, 2)) else: score = self.box_score_slow(pred, contour) - if self.box_thresh > score: - continue + if self.box_thresh > score:continue box = self.unclip(points).reshape(-1, 1, 2) box, sside = self.get_mini_boxes(box) - if sside < self.min_size + 2: - continue + if sside < self.min_size + 2:continue box = np.array(box) - box[:, 0] = np.clip( - np.round(box[:, 0] / width * dest_width), 0, dest_width) - box[:, 1] = np.clip( - np.round(box[:, 1] / height * dest_height), 0, dest_height) + box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width) + box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height) boxes.append(box.astype(np.int16)) scores.append(score) return np.array(boxes, dtype=np.int16), scores @@ -158,22 +153,20 @@ def box_score_slow(self, bitmap, contour): def __call__(self, outs_dict, shape_list): pred = outs_dict['maps'] - if isinstance(pred, torch.Tensor): - pred = pred.cpu().numpy() pred = pred[:, 0, :, :] segmentation = pred > self.thresh + if isinstance(segmentation, torch.Tensor): + segmentation = segmentation.cpu().numpy() + boxes_batch = [] for batch_index in range(pred.shape[0]): src_h, src_w, ratio_h, ratio_w = shape_list[batch_index] if self.dilation_kernel is not None: - mask = cv2.dilate( - np.array(segmentation[batch_index]).astype(np.uint8), - self.dilation_kernel) + mask = cv2.dilate(np.array(segmentation[batch_index]).astype(np.uint8),self.dilation_kernel) else: mask = segmentation[batch_index] - boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask, - src_w, src_h) + boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,src_w, src_h) boxes_batch.append({'points': boxes}) return boxes_batch \ No newline at end of file diff --git a/rough_layout.py b/rough_layout.py index 7720395..939cb48 100644 --- a/rough_layout.py +++ b/rough_layout.py @@ -66,7 +66,7 @@ class PDFImageDataset(IterableDataset): client = None #client = build_client() def __init__(self, metadata_filepath, aug, input_format, - mfd_pre_transform, + mfd_pre_transform, det_pre_transform=None, return_original_image=False): super().__init__() self.metadata= self.smart_read_json(metadata_filepath) @@ -77,6 +77,7 @@ def __init__(self, metadata_filepath, aug, input_format, self.input_format = input_format self.mfd_pre_transform = mfd_pre_transform self.return_original_image = return_original_image + self.det_pre_transform = det_pre_transform def smart_read_json(self, json_path): if "s3" in json_path and self.client is None: self.client = build_client() if json_path.startswith("s3"): json_path = "opendata:"+ json_path @@ -146,10 +147,13 @@ def read_data_based_on_current_state(self): # if layout_image.size(1) < 1042: # layout_image = torch.nn.functional.pad(layout_image, (0, 0, 0, 1042-layout_image.size(1))) mfd_image=self.prepare_for_mfd_model(oimage) - - #print(self.current_pdf_index, self.current_page_index) - return self.current_pdf_index, self.current_page_index, mfd_image, layout_image, height, width, oimage - + det_images = torch.from_numpy(self.det_pre_transform(original_image)[0]) + + output= {"pdf_index":self.current_pdf_index, "page_index":self.current_page_index, "mfd_image":mfd_image, "layout_image":layout_image, "det_images":det_images, "height":height, "width":width} + if self.return_original_image: + output['oimage'] = original_image + return output + def check_should_skip(self): if self.current_pdf_index >= len(self.metadata): raise StopIteration @@ -196,32 +200,25 @@ def prepare_for_mfd_model(self, im:np.ndarray): im = torch.from_numpy(im) return im[0] -def custom_collate_fn(batch): - current_pdf_indexes = [] - current_page_indexes = [] - mfd_images = [] - layout_images = [] - heights = [] - widths = [] - oimages = [] - - - for current_pdf_index, current_page_index, mfd_image, layout_image, height, width, oimage in batch: - current_pdf_indexes.append(current_pdf_index) - current_page_indexes.append(current_page_index) - mfd_images.append(mfd_image) - layout_images.append(layout_image) - heights.append(height) - widths.append(width) - oimages.append(oimage) +def custom_collate_fn(batches): + + return_batch = {} + for batch in batches: + for key,val in batch.items(): + if key not in return_batch: + return_batch[key] = [] + return_batch[key].append(val) + + keys = list(return_batch.keys()) + for key in keys: + if key in ["pdf_index", "page_index","height", "width"]: + return_batch[key] = torch.tensor(return_batch[key]) + elif key in ["mfd_image", "layout_image", "det_images"]: + return_batch[key] = torch.stack(return_batch[key]) + elif key in ['oimage']: + return_batch[key] = return_batch[key] + return return_batch - mfd_images = torch.stack(mfd_images) - layout_images = torch.stack(layout_images) - heights = torch.tensor(heights) - widths = torch.tensor(widths) - current_pdf_indexes = torch.tensor(current_pdf_indexes) - current_page_indexes = torch.tensor(current_page_indexes) - return current_pdf_indexes, current_page_indexes, mfd_images, layout_images, heights, widths, oimages def clean_layout_dets(layout_dets): rows = [] @@ -238,12 +235,23 @@ def clean_layout_dets(layout_dets): from modules.self_modify import ModifiedPaddleOCR,update_det_boxes,sorted_boxes from utils import * - -def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmodel=None, inner_batch_size=4, batch_size=32,num_workers=8,timer=Timers(False)): +def process_batch(inputs): + preds, shape_list,ori_shape = inputs + post_result = ocrmodel.batch_det_model.fast_postprocess(preds, shape_list) + dt_boxes = post_result[0]['points'] + dt_boxes = ocrmodel.batch_det_model.filter_tag_det_res(dt_boxes, ori_shape) + return dt_boxes +def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, + ocrmodel=None, inner_batch_size=4, + batch_size=32,num_workers=8, + do_text_det=False, + do_text_rec=False, + timer=Timers(False)): dataset = PDFImageDataset(pdf_path,layout_model.predictor.aug,layout_model.predictor.input_format, mfd_pre_transform=mfd_process(mfd_model.predictor.args.imgsz,mfd_model.predictor.model.stride,mfd_model.predictor.model.pt), - det_pre_transform=ocrmodel.batch + det_pre_transform=ocrmodel.batch_det_model.prepare_image, + return_original_image=do_text_rec ) dataloader = DataLoader(dataset, batch_size=batch_size,collate_fn=custom_collate_fn, num_workers=num_workers) @@ -255,19 +263,23 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmod batch = featcher.next() while batch is not None: # try: - pdf_index_batch,page_ids_batch, mfd_images_batch,images_batch,heights_batch, widths_batch,oimage_list = batch + pdf_index_batch, page_ids_batch = batch["pdf_index"], batch["page_index"] + mfd_layout_images_batch, layout_images_batch, det_layout_images_batch = batch["mfd_image"], batch["layout_image"], batch["det_images"] + heights_batch, widths_batch = batch["height"], batch["width"] + oimage_list = batch.get('oimage',None) pdf_index = set([t.item() for t in pdf_index_batch]) new_pdf_processed = pdf_index - pdf_passed pdf_passed = pdf_passed|pdf_index - for j in tqdm(range(0, len(mfd_images_batch), inner_batch_size),position=3,leave=False,desc="mini-Batch"): + for j in tqdm(range(0, len(mfd_layout_images_batch), inner_batch_size),position=3,leave=False,desc="mini-Batch"): pdf_index = pdf_index_batch[j:j+inner_batch_size] page_ids = page_ids_batch[j:j+inner_batch_size] - mfd_images = mfd_images_batch[j:j+inner_batch_size] - images = images_batch[j:j+inner_batch_size] + mfd_images = mfd_layout_images_batch[j:j+inner_batch_size] + images = layout_images_batch[j:j+inner_batch_size] heights = heights_batch[j:j+inner_batch_size] widths = widths_batch[j:j+inner_batch_size] - oimages = oimage_list[j:j+inner_batch_size] + oimages = oimage_list[j:j+inner_batch_size] if oimage_list is not None else None + detimages = det_layout_images_batch[j:j+inner_batch_size] with timer('get_layout'): layout_res = layout_model((images,heights, widths), ignore_catids=[]) with timer('get_mfd'): @@ -303,31 +315,67 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmod ori_shape_list.append((real_input_height, real_input_width)) pdf_and_page_id_this_batch.append((pdf_id, page_id)) rough_layout_this_batch.append(layout_dets) + assert real_input_height == 1920 + assert real_input_width == 1472 if ocrmodel is not None: + if not do_text_det:continue with timer('text_detection/collect_for_line_detect'): - canvas_tensor_this_batch, partition_per_batch,canvas_idxes_this_batch,single_page_mfdetrec_res_this_batch = collect_paragraph_image_and_its_coordinate(oimages, rough_layout_this_batch) - with timer('text_detection/get_line_detect_result'): - dt_boxes_list = ocrmodel.batch_det_model.batch_process(canvas_tensor_this_batch,ori_shape_list=ori_shape_list) - with timer('text_detection/collect_for_text_images'): - text_image_batch, text_image_position,text_line_bbox = collect_text_image_and_its_coordinate(single_page_mfdetrec_res_this_batch, partition_per_batch, canvas_tensor_this_batch,dt_boxes_list) - with timer('text_detection/get_line_text'): - rec_res, elapse = ocrmodel.text_recognizer(text_image_batch) - for line_box, rec_result,(partition_id,text_block_id, text_line_id) in zip(text_line_bbox, rec_res,text_image_position): - text, score = rec_result - pdf_id, page_id = pdf_and_page_id_this_batch[partition_id] - pdf_path = dataset.metadata[pdf_id]['path'] - p1, p2, p3, p4 = line_box.tolist() - data_to_save[pdf_path][page_id].append( - { - 'category_id': 15, - 'poly': p1 + p2 + p3 + p4, - 'score': round(score, 2), - 'text': text, - } - - ) - + canvas_tensor_this_batch, partition_per_batch,canvas_idxes_this_batch,single_page_mfdetrec_res_this_batch = collect_paragraph_image_and_its_coordinate(detimages, rough_layout_this_batch,2) + shape_list_batch = np.array([[(1920,1472, 0.5, 0.5)]]*len(canvas_tensor_this_batch)) + ori_shape_list = [(1920,1472)]*len(canvas_tensor_this_batch) + with torch.no_grad(): + with timer('text_detection/stack'): + canvas_tensor_this_batch = torch.stack(canvas_tensor_this_batch) + with timer('text_detection/det_net'): + dt_boxaes_batch = ocrmodel.batch_det_model.net(canvas_tensor_this_batch) + # with timer('text_detection/discard_batch'): + # pred_batch = ocrmodel.batch_det_model.discard_batch(dt_boxaes_batch) + with timer('text_detection/batch_postprocess'): + #dt_boxes_list=ocrmodel.batch_det_model.batch_postprocess(pred_batch, shape_list_batch,ori_shape_list) + preds_list = dt_boxaes_batch + with timer('text_detection/batch_postprocess/postprocess_op'): + post_result = ocrmodel.batch_det_model.fast_postprocess(preds_list, shape_list_batch[:,0]) + # dt_boxes = post_result[0]['points'] + with timer('text_detection/batch_postprocess/filter'): + dt_boxes_list = [ocrmodel.batch_det_model.filter_tag_det_res(dt_boxes['points'][0], ori_shape) + for dt_boxes, ori_shape in zip(post_result,ori_shape_list )] + + if oimages is not None and do_text_rec: + with timer('text_detection/collect_for_text_images'): + text_image_batch, text_image_position,text_line_bbox = collect_text_image_and_its_coordinate(single_page_mfdetrec_res_this_batch, partition_per_batch, oimages,dt_boxes_list) + with timer('text_detection/get_line_text_rec'): + rec_res, elapse = ocrmodel.text_recognizer(text_image_batch) + for line_box, rec_result,(partition_id,text_block_id, text_line_id) in zip(text_line_bbox, rec_res,text_image_position): + text, score = rec_result + pdf_id, page_id = pdf_and_page_id_this_batch[partition_id] + pdf_path = dataset.metadata[pdf_id]['path'] + p1, p2, p3, p4 = line_box.tolist() + data_to_save[pdf_path][page_id].append( + { + 'category_id': 15, + 'poly': p1 + p2 + p3 + p4, + 'score': round(score, 2), + 'text': text, + } + + ) + else: + for partition_id in range(len(partition_per_batch)-1): + pdf_id, page_id = pdf_and_page_id_this_batch[partition_id] + pdf_path = dataset.metadata[pdf_id]['path'] + partition_start = partition_per_batch[partition_id] + partition_end = partition_per_batch[partition_id+1] + dt_boxes_this_partition = dt_boxes_list[partition_start:partition_end] + for dt_boxes in dt_boxes_this_partition: #(1, 4, 2) + for line_box in dt_boxes: + p1, p2, p3, p4 = line_box.tolist() + data_to_save[pdf_path][page_id].append( + { + 'category_id': 15, + 'poly': p1 + p2 + p3 + p4, + } + ) pbar.update(len(new_pdf_processed)) # except: # print("ERROR: Fail to process batch") @@ -375,7 +423,11 @@ def deal_with_one_dataset(pdf_path, result_path, layout_model, mfd_model, ocrmod timer = Timers(True) deal_with_one_dataset("debug.jsonl", "debug.stage_1.jsonl", - layout_model, mfd_model, ocrmodel=ocrmodel, inner_batch_size=3, batch_size=12,num_workers=4,timer=timer) + layout_model, mfd_model, ocrmodel=ocrmodel, + inner_batch_size=2, batch_size=4,num_workers=4, + do_text_det = True, + do_text_rec = True, + timer=timer) # dataset = PDFImageDataset("part-66210c190659-000035.jsonl",layout_model.predictor.aug,layout_model.predictor.input_format,mfd_pre_transform=None) # dataloader = DataLoader(dataset, batch_size=8,collate_fn=custom_collate_fn) From 1b60a5e9de6fe21f4e41670505a2a52e7ab7d611 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 31 Jul 2024 07:02:44 +0000 Subject: [PATCH 14/74] sync --- utils.py | 226 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 utils.py diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..05bfd43 --- /dev/null +++ b/utils.py @@ -0,0 +1,226 @@ +import numpy as np +import copy +from modules.self_modify import sorted_boxes, update_det_boxes +import torch +import cv2 +def get_rotate_crop_image(img, points, padding=10): + """ + Extracts a rotated and cropped image patch defined by the quadrilateral `points` + with an additional padding. + + Args: + img (numpy.ndarray): The input image. + points (numpy.ndarray): A (4, 2) array containing the coordinates of the quadrilateral. + padding (int): The number of pixels to expand the bounding box on each side. + + Returns: + numpy.ndarray: The cropped and rotated image patch. + """ + assert len(points) == 4, "shape of points must be 4*2" + + # Calculate the bounding box with padding + img_height, img_width = img.shape[0:2] + left = max(0, int(np.min(points[:, 0])) - padding) + right = min(img_width, int(np.max(points[:, 0])) + padding) + top = max(0, int(np.min(points[:, 1])) - padding) + bottom = min(img_height, int(np.max(points[:, 1])) + padding) + + # Crop the image with padding + img_crop = img[top:bottom, left:right, :].copy() + + # Adjust points to the new cropped region + points[:, 0] -= left + points[:, 1] -= top + + # Calculate the width and height of the rotated crop + img_crop_width = int( + max( + np.linalg.norm(points[0] - points[1]), + np.linalg.norm(points[2] - points[3]) + ) + ) + img_crop_height = int( + max( + np.linalg.norm(points[0] - points[3]), + np.linalg.norm(points[1] - points[2]) + ) + ) + + # Define the destination points for perspective transformation + pts_std = np.float32( + [ + [0, 0], + [img_crop_width, 0], + [img_crop_width, img_crop_height], + [0, img_crop_height], + ] + ) + + # Perform the perspective transformation + M = cv2.getPerspectiveTransform(points, pts_std) + dst_img = cv2.warpPerspective( + img_crop, + M, + (img_crop_width, img_crop_height), + borderMode=cv2.BORDER_REPLICATE, + flags=cv2.INTER_CUBIC, + ) + + # Rotate the image if the height/width ratio is >= 1.5 + dst_img_height, dst_img_width = dst_img.shape[0:2] + if dst_img_height * 1.0 / dst_img_width >= 1.5: + dst_img = np.rot90(dst_img) + + return dst_img + +def collect_text_image_and_its_coordinate(single_page_mfdetrec_res_this_batch, partition_per_batch, oimages, dt_boxes_list): + text_image_batch = [] + text_image_position=[] + text_line_bbox = [] + for partition_id, single_page_mfdetrec_res in enumerate(single_page_mfdetrec_res_this_batch): + partition_start = partition_per_batch[partition_id] + partition_end = partition_per_batch[partition_id+1] + #print(partition_start,partition_end) + dt_boxes_per_page = dt_boxes_list[partition_start:partition_end] + for text_box_id, dt_boxes in enumerate(dt_boxes_per_page): + ori_im = oimages[partition_id] + height, width, _ = ori_im.shape + dt_boxes = sorted_boxes(dt_boxes) + dt_boxes = update_det_boxes(dt_boxes, single_page_mfdetrec_res) + for bno in range(len(dt_boxes)): + tmp_box = copy.deepcopy(dt_boxes[bno]) + text_line_bbox.append(tmp_box) + img_crop = get_rotate_crop_image(ori_im, tmp_box, padding=10) + text_image_batch.append(img_crop) + text_image_position.append((partition_id,text_box_id,bno)) + + return text_image_batch, text_image_position,text_line_bbox + + +def collect_mfdetrec_res_per_page(single_page_res): + single_page_mfdetrec_res = [] + for res in single_page_res: + if int(res['category_id']) in [13, 14]: + xmin, ymin = int(res['poly'][0]), int(res['poly'][1]) + xmax, ymax = int(res['poly'][4]), int(res['poly'][5]) + single_page_mfdetrec_res.append({"bbox": [xmin, ymin, xmax, ymax]}) + return single_page_mfdetrec_res + +def collect_image_tensor_cropped(oimage:np.ndarray, single_page_res, scale=1): + image_np = oimage + canvas_list = [] + canvas_idxes= [] + for bbox_id, res in enumerate(single_page_res): + if int(res['category_id']) in [0, 1, 2, 4, 6, 7]: #需要进行ocr的类别 + xmin, ymin = int(res['poly'][0]/scale), int(res['poly'][1]/scale) + xmax, ymax = int(res['poly'][4]/scale), int(res['poly'][5]/scale) + if isinstance(image_np, np.ndarray): + canvas = image_np.ones_like(image_np) * 255 + else: + canvas = torch.ones_like(image_np) * image_np[0,0,0] + if canvas.shape[0]==3: + canvas[:,ymin:ymax, xmin:xmax] = image_np[:,ymin:ymax, xmin:xmax] + elif canvas.shape[2]==3: + canvas[ymin:ymax, xmin:xmax,:] = image_np[ymin:ymax, xmin:xmax,:] + else: + raise ValueError("image shape is not 3 or 4") + canvas_list.append(canvas) + canvas_idxes.append(bbox_id) + return canvas_list, canvas_idxes + +def collect_paragraph_image_and_its_coordinate(oimages, rough_layout_this_batch,scale=1): + canvas_tensor_this_batch = [] + canvas_idxes_this_batch = [] + single_page_mfdetrec_res_this_batch = [] + partition_per_batch = [0] + for oimage, single_page_res in zip(oimages, rough_layout_this_batch): + single_page_mfdetrec_res = collect_mfdetrec_res_per_page(single_page_res) + canvas_tensor, canvas_idxes = collect_image_tensor_cropped(oimage, single_page_res,scale=scale) + canvas_tensor_this_batch.extend(canvas_tensor) + canvas_idxes_this_batch.append(canvas_idxes) + single_page_mfdetrec_res_this_batch.append(single_page_mfdetrec_res) + partition_per_batch.append(len(canvas_tensor_this_batch)) + return canvas_tensor_this_batch, partition_per_batch,canvas_idxes_this_batch,single_page_mfdetrec_res_this_batch + +def collect_paragraph_image_and_its_coordinate_from_detection_batch(detection_images, rough_layout_this_batch): + canvas_tensor_this_batch = [] + canvas_idxes_this_batch = [] + single_page_mfdetrec_res_this_batch = [] + partition_per_batch = [0] + for oimage, single_page_res in zip(detection_images, rough_layout_this_batch): + single_page_mfdetrec_res = collect_mfdetrec_res_per_page(single_page_res) + canvas_tensor, canvas_idxes = collect_image_tensor_cropped(oimage, single_page_res) + canvas_tensor_this_batch.extend(canvas_tensor) + canvas_idxes_this_batch.append(canvas_idxes) + single_page_mfdetrec_res_this_batch.append(single_page_mfdetrec_res) + partition_per_batch.append(len(canvas_tensor_this_batch)) + return canvas_tensor_this_batch, partition_per_batch,canvas_idxes_this_batch,single_page_mfdetrec_res_this_batch + +import time, math +class _DummyTimer: + """A dummy timer that does nothing.""" + + def __enter__(self): + return self + + def __exit__(self, *args): + pass + +class _Timer: + """Timer.""" + + def __init__(self, name): + self.name = name + self.count = 0 + self.mean = 0.0 + self.sum_squares = 0.0 + self.start_time = None + + def __enter__(self): + self.start_time = time.time() + + def __exit__(self, exc_type, exc_value, exc_traceback): + elapsed_time = time.time() - self.start_time + self.update(elapsed_time) + self.start_time = None + + def update(self, elapsed_time): + self.count += 1 + delta = elapsed_time - self.mean + self.mean += delta / self.count + delta2 = elapsed_time - self.mean + self.sum_squares += delta * delta2 + + def mean_elapsed(self): + return self.mean + + def std_elapsed(self): + if self.count > 1: + variance = self.sum_squares / (self.count - 1) + return math.sqrt(variance) + else: + return 0.0 + +class Timers: + """Group of timers.""" + + def __init__(self, activate=False): + self.timers = {} + self.activate = activate + def __call__(self, name): + if not self.activate:return _DummyTimer() + if name not in self.timers: + self.timers[name] = _Timer(name) + return self.timers[name] + + def log(self, names=None, normalizer=1.0): + """Log a group of timers.""" + assert normalizer > 0.0 + if names is None: + names = self.timers.keys() + print("Timer Results:") + for name in names: + mean_elapsed = self.timers[name].mean_elapsed() * 1000.0 / normalizer + std_elapsed = self.timers[name].std_elapsed() * 1000.0 / normalizer + space_num = " "*name.count('/') + print(f"{space_num}{name}: {mean_elapsed:.2f}±{std_elapsed:.2f} ms") From a1cef0af702e5eeaeb9dd09f1ec7c54b6a7bf2d3 Mon Sep 17 00:00:00 2001 From: zhangtianning Date: Wed, 31 Jul 2024 07:41:09 +0000 Subject: [PATCH 15/74] add weight --- weights/en_ptocr_v3_det_infer.pth | Bin 0 -> 2540826 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 weights/en_ptocr_v3_det_infer.pth diff --git a/weights/en_ptocr_v3_det_infer.pth b/weights/en_ptocr_v3_det_infer.pth new file mode 100644 index 0000000000000000000000000000000000000000..4840e334b5417837de6e492cc067ab14eb879275 GIT binary patch literal 2540826 zcmb512b>hO`^M=fU_lfF1w}NyH?$!sU}XY8aq3jGHq6`v8rXl>?zYGjSX97%$ljj&dKx&x>i;; zSBM}=j^C#x=w>LrHlH?ge7J*g_6oY|ub0lH)3xbLO?^#mCKvS3(kJdW8?~=)8wN{l z-_)tHX{n~J@_!igtZZ7PsVBH|81y=Rvgqv{221CIWtvw?h@LQ`Dw4eHWE4&AHaT8n zrY03EH@Tu|DQfCNO-)P93zl!{JTK^LK6`~QSTP^;YwqXz>xRL4`C$F#p{@#M z%$`1W!uW9FfGMcnGb^<^1+VHRsX*_WytR2sj@DW3wDmG+$E~AS*qOCRJmIi?4A$yXs-KjRk{Z8 zo>A4Y5t~kBGr?X|EhBa;RToF>xG)%>4<>k9cEU{gb)V5PacXkZPIQ0GR9!9?OrowW zsjG#$%t+Ngkb771^BDa=?qj|*KiO0A!M@Gwd%kpBLC46Q>Zw1MO9j)SW}Y53^9*U` znWo15!XU^8${VGQ8ujR%<*u)$nxDEbs&95w-yErLe^cK9VQ^qRIHPl zL`{65H1S2Iwu{5yl6-Kf_qR7zn;yNFdFrZXj=4Ol>x!tZE2XZhOkG!p!8Q5dTJJAJ ztgf<=d!47wn)mes|jw8s=Onr z@=mF8fvIv~7%a*McX@w%V^z9F?A=k-u@PHcUz-W;p=ueii>bOeVq3$YEguv-BR1__ zEwsD8Vs#x~@!rW5V>(Gi_fb)0(~#nF;eKn#K48A_gJJMcK6uzOWXr4X7_yIeD$mxX zf=Bt1L#X>PY2P99g2$zOpD?vO83s?~gQq=1Hc^`%vd_5dN@rQUJ{#5bTvXTdQr8Qn zt{21LrF`(RXUHb%DjTw|xa+K`sm=tiMs>ax)%m*A`G%?U%`kW?AH3}uvfesn$iCyQ zGF8nA<=v>t_o6D_mnuInRel%-ALWCOJwrB8rEAE35>*`=vNd&?n&4BamXY)sRTqcs z=V9Yepbt$#1WE(ll0V6qmfFfskQX{7? zC{!z8^J+z`XSv~&RgcZ94YA(o!lOk3Xaqcul)CDS8Y$UEN&_&G(g+kO zqezXE(V$Rmgw3msvEFGUCdCyzo4}7Gl?P9*K1)w+D%mD@HUlE57(APE8LBO?d9@|h z6FeE;T1agL$=Mk78GcUG){>A2zSH z!+L_JTvkW$Y!AgFDwC;EdC9J5szlQcKUp8ukE+hCNA*hP^&Bo@{9IUr*;tLde z_Sqkzt3hhH0mu*A0g_!YW-@Ctbs(7PC4kKGOdW)$V}+&;=F+s!A)rtlip{ITu%6Xg z#=lxqheLA-t6|9SGdEYVO-~&GjC$%wp!C#Hq(;fnpis@j=G8G+?=MWekqBoUNXJ5Q z>&VvAJ;zD5(Q!O5(s2S%beu?PbeseV)yderItA-l&N*A8ESOG(?ADV@XY15_$u@dU z14epI2a29ENR6H|L7_Sen^$LJJ6 z#hw=JfVv8jt3jwaS68F1mh5Oox&}Orh`M)GKp0D1_XTS_*|}pc?nF_)^hG;Jl1mV6)sKNyb21{YuLPc9qSC9xU7!gc>{`D zRBcTzr{0uo^Aqk6iNGu?D=>K>|Ycq*F)@_%=kjJk7a+NTF7R2A5~S_UkJ=Gf+_0-Zp>8WK%jgnW)Ymf?rX;&!%)Js&X)sk+HVr5)=Q4P#EXj}{4_|G{%fVfin&x3`mQBV~JFBqa|NDLarFDLaBfwG%e4 zcE);Vq?i;}@azIVk`xP`x@!K3-z?cCcy`$G_qCl5B2m_h6uo1yglp(`dp+|Kn#I9*HF`WHuVlrD>Z9pioW3=G7#uCwR(b zbp%ff6ptuw8mhe|TmRW#?E{n^8a+=<21*Z2AvG%Y1%+xVHm{~(J;9Sup@U~S1h^$7A#A1gy7j;wu$9fKG(y zX^*%5eqN9ehS4D;`uC{zZMKWSgK`07OHX5D)MjD<3iiW31jfST|p?U_JSI=U-zj`qZWkK{DM7NgO3_LYAkeT?-4Px&9M?BMwX zlDjdo8SWx~D%nx+dq|37c0xV?Du>P@#k87YJ?*wblI0{wmo%SH5*o^a=O2h}Ej*%^Qk~d)6KUxTjI?wCik7aVMoTvk zw}7yD)dTAZ9ABb#d?2X;*C^X&aDN>qobC87Y&naqhoDgq@xljI);-P9qWKX zwJtWV*28+1bIuki3#RoUyY*yvZdz4Iw$ZZzFw&C(ik>v7(USp%sv4VDHCWGb?qBst zK-I!^%cd2y0x&_PpLg5+i2Mn7-`uHC|bsn8ZF~M+y=tt)dZ|}M)GKp0GbHTEu}8a@_&+K z8!0WoNXp(ok+Kh|kun(+swvpK+867ckz!I@!7~+pBqGaZPeV(`r1 zl4p>xd9@$b89bSP`D1GU$Gl48MA&9(w{zGR!=xd4cyV(?tZCC?jS zdEN-??VI>Y#h!gGf#`0KYW7>IOC`HvOs*S)^)fKkO8`aB&RvejdUoy#E=~Jf2@2I! z*u1(L>s-!B`^2Sn1kp9n+`?+ugQ~8TY|~TM0i&L}9wh0_X9=F z1EfaIgP>47gw3mmvCie3M~*9~9^o@0S+SsEzeq|wD%mEe9s?q)7*vmQ$ume;owXCtMamTdmm+LwTO229oR*HsBA{y_9Bp6Eg@pX1WB&-0*Ay@1WD z7qQ+M$(Gd-OfNxki>hPPsFx+%{7L*3p!5=t|GWy6UV4qxsCXR|syDEC^(NLkBgItc zKza*;TSG0Ic+}gHZ8W?Cj5NFp6b0F;~v@1l5K+L7a)>~!SgGZJcESg86>PH zc*_51We3k6klc-t<##Ng}?DwPb&PwrD>Z^oX--f&e*)_ zg7pMXxvY-h=?cXyiZfMms+(k2G)+cRccApp)Tmer6sn%syy}JZ1W!VR z4xZi++!|_RE5y>0Z8R(cj5I6@6b;Lf8V!9wp;{iBSADUb;7MpG3!W7qy0uiZuU@Sv z*+xr0V5Fr#P_(Q>YP1XhvFQxUrZcQ3c)VI9cvgYumQusn7;2DY8!3ZOjnqfV`laS&Ho;Be|l48Nb-$7DpsAQYqSqq4yV(<*(lIM-EJa2^c_Dy`H zVh7N0i0%gA--bM7B-s^Xu9Zi9)&*0&1W>e`TMv)5oLiqu(>_(8P;G$Cs}$C=oOAx| zEV?_FhUOMl%~oiYk!;gb)xfByYJk#HwWLN#9Vk>;Y+mKC-d~t_BN5Izkm@11b!6D9 ztTvQvqhkax(lHV!IvPlgjz&!CN!*PIrp!6B%ro}>y{&XR@By#ZR9ioBRSguMa~#fBWGI>o6oRp zKEryJb51#~pxU0#h-AfrD#JMwDlge4sCED%s~A)}a>+ADSe`+`ddG`BE!qLK3nX`g zRLdq(Ir^(OBkc;NYMFv0q*%Z0h9|m^%iXy&?Xw3cRC{9cYA>vJMzUpf%t&LQxJ9L9 zui`k#Hfx6QK)DlimA|nv=0Q22HC zzCh71mDFgM1`5@5Y+lX4dVlp|8p?ubCPcTE8rfyLpJW>?0Wi{{fTCpM^jsPO57(7RE$ume;oQ2e77&A_ma|^&!Ur7K( z%ejSktmWJyE=~K~1q#*O*u1(2>sih@|8^EF=N3bA3#*as;jNNwda4Z=^;7{UJ=IQX zl-vsn)qU8!x*zNPg^4#(aXI$@B&UvaS~gofDA`8GL%>MK!$8sT2&vKWC@55qVe{&7 ztY~2HdIGXjk32b@Qcp^@(eo5A((^P>^gKgq^gIjV_79d#Xjsp3?qBstK)nFh zDJPwhXLMhbY$N9-U?k^dpvZZJ)W~@i#O5Em~V5*iWXq;KjeTXNz zkmcM*T$=Xz7!;~cuzB?<);lBFvN~p@&!D(PWn`o8=aOyK3||1Hmsrky36x&?iqxq1 z8WgH;uzB?@);lA`ROlJ$I|xn zY>d<{l5M1P1x8Z30Yyr8QX{1Yh|OnMHlJa=Gg3^7D|mXsk0ix|M;-wpVQ~*`}wq07gBvB~W^5D^jCmYY>MNVe@JmtoIit-blse+!#ns9qF`eJl`++7atn&N*A8ESPqJ?9`LW$v&%{CEMuP1sLg(gW^Tc zuB1lKZXj;|VA+I*^(^Q9RgVPJo^YLV(ka;~vX^8VIb(s5oN+*rGoIAQnE+z*8J5jw zSkH3KDaRF5Eqq2Kt30S!^ztw2y(QZO)jmLE6@zLrmpp@nc~U=cR24#Ik8q?iglBOMIEt%1LzrPLvkZ8RJTj5Hhu z6b*-y8Vz$np*jMaS4U#Kzj`qZWkGZlM2{BP>3g(f8!hvIk(OhCqUBgpqvbddo6)dr zM#Fk%B##yepcCP_rBusFQ71{Zk#aIHl5z@Aq?}4>q|67g`3%eEGpu(;ib-(=&l&I| zNwMI`vcpuJDcL4?&H^H-7(8cl$ume;oPn#W&{d>H#nqrtU4zZ5Yq6f-NvP1la~%Yyh7`|;rPTG3Z8Y2fj5OQ`6b(0# z8VxstLUju^uWrS9f+wM&EO>5%=+Tm6&3(IM8!dMLBQ19VMau$Gqh%q8O=nm(onbw} zN*k$>QUI~h49i9{tS5L9Qe44vAN)vCdGOFr z%slr?wh5jGfJiC^&x2g@yb+e?jj-OniLX@b+2;|6&IU=Rm*w0u(A>f@a(>sdl5KkGIbhUN&jY2W zULZ9}UIc~eC2U^3jP?G)#2cx&oO=b5Q%9;k#d7Xd$u>G(14cSt2a1k2NR5s+L7{pJ zn^$jRJm4ulv}gy^PmtUVQZJkHewJ)zq#6S17cf;{C{%x7^XgBmcSf>hb<9YALGg%^<7EDp?24uwn*ISwFXiT` zPCZO7btW|`x_~&a2+LX6SnrG!Q=w<1?hxD>a=XVXju*zY3TzLEz6S{Eqy_3M#Hih4eOneJX$1x z`oVKbN#*3=q5hI>q^tytqznLxl!2s1%E}-%pJCa2hV{-!F)6O#84N#?6bqi592c^x zWSihw4Tz*-@C@OSXOOTwgM{@2Ppx-3w7MQ9l$86V`_|f4q z43`8G7xIsxwYfBnQwieuYb?iKV?EJRuB#(@)`j9!#XrzcwVq^GpaWIwgHgrBJXHmj zPTGLf=tzM$p9q^*8LTIM5;}DJR6}s8NM+@qpBl-IRMditD(b+ZB1>virHH~0VGm6xx84Y6Z8J4|gSWgUj)kq9&0?#QX zRWEywH*jUfPuJ&IRcDq9JD3hZfc9ZO=vvvm;J8KWHbk?4vM$29x4lKfQ zCN|dlYZGs$;?izBB)5`uni*+=WJgLSf{RKffkjCRsZp{wh!e4~oQRF}Ebg4GQWjNH zAiEV+^CCaBuVhDxrhe28i21ST?0$JY6Q&zu^kP|b~LPKf#($Dimn6sj7V5{bfs(LX}W_X+eFvFKy($O>kuw^ zDhbO|Nm%cwu_sD9vJQvjX%g9$HdnHvIqC>7RWm2aETt!91ld5Eb&eNNtXJTVH6C3OO0gP!V3#D@)dbG%_|mBxTxY@ zu&B6?)Tp>0#DUmY4#dVfBgmnmEP@__=usmN_&qGyk(x)qMKzCtMa^TRM$O|O_M>6h zkA`)|(7&jW7D7#VC4}OCCnT@-PzC+duK8i#-v&0nyzgHF7A%o082Ek2k@o zx4={%O+5Nky0>vz$C$jsrD>pdL7b0`<$P?cbBQMn6xY@fO&>t>D3g;0K9uaJvpxbB zJL_Yxbk--NM$4xl&dJ7dPBzy2Yva(OBkBuCE+sX&S~=DIOUaIud<8Bl`5G)rz9BVA zz6EhgHkMPevCbu)r&Y?L>IcYfMLBs;>PN|r6#WD)D*72LihdzAihc#LQ4PyRHLP=q zw**BJS%1KF3#yYp$o?tWk)Xf8ML~aqMbJN_Mo=f-<;@l~EL+sD&Ly5lkSn^n@)?n^ zvgqPD9l6ulO|ngNbqAuW7+pQMn?a1m0$=xJ1a#ORHWb-H4DMVIp zFjZGJZC6_2Ese`w;w{6qX`y97oSKd0)NHJGlCp($gw^s;+_GxrpSgV{yP|13RILC+ zRnvC!)QUjqr+%bHMSl=yW@9-s8|$5-Vk&e%4TRvD8U_JH!(dXQ zVO0?4WMere8|(c6jAEo+e)EyFqxedvMw-^vK~;RtWRpBRDsx}hGml));l@Hq`1N-4L_0;3!gf9 zZZ;#?CVZ-aNGgU;4VOHtgymT!tS5ZRcM{s+lZE7Nj2gMqo0Dw9=WGUQJ(#L1$GqFQ ziMM{sZMzL|5l+Rscq6zpjWZI&G1*v-$;Nu3r(9P@^o)YyR+W|Kaz;yb1v*f*5g1im z%u^eKrIR)xH99s0aa1;zqq4D{_(|x{@v{X4w~AW16ThWoM=G`g7gcNx78OmTM#VND z4#~!HNH*3JK?xOQ5%fQZZZ%oC1G1fDM{2eQ7uDp!qGktDqh?1Cd(^P(QNwy-$g4(T zXcu@MF>+JBS+XNByMl{ib_0u;-ARp@JwWVC!?H6C>xrU-7*`aHg&&EEMNw9s{~IUS zCW^)b(Nv7030(4k5|#&)u-^WOFJ0_NYJuo(k{Y=mzPDu4Bvp*reZW+&BPlv~crq?) zi8qBy(?I)zI5Qi|nb}y+63@A{G1@#a9hzGii-we%A=y!9%>)-aYd^4bRzPaBC=dr5 zVL3Y+>;1Kfw-b@ABWgAzx00+pKQ%|PBPIKTi%Jdvi;@FLjgo^v9A|{(jVp>YeSHqsriIQ0 zagsKcleDqkNy--1F-ctj#Vso*Pd!{H*=8Ma5fD|f#Jd};&}2Qa3gER=45=+;s# zfAqRhvW=FTfRUD)fuiLWQlsTo5F6RBY-GcFrzejV38FjTxuw*~gCKWGwvn;`7)e#aSiL8oMKX3;jwMw=LpEe+pis4h>lINGOJimnX zgirbILOXo!gXC!pc?$e~$&SM30WekDOS}hhSxdZ!xHOIPFo>hGu^gR^^+Zp(u8!z= z42oM-wQRb1T(YCzxhKG=(o4K2!O}@jks2LOgE-p=%kkM*Py8fw==gaKf?EauLQ1LU zB|B2_0=TH+MX;!NiPWfg8N|WaSPss{dLk&HqAY@5gXmV1m4^{um+VN*8{ndvH^HLj zEmEWAZ4mp`uTom&mSj2opYQ%gDVz(NW-D+4* z6eYyCqUbaDk*HV{)ybbqKbLG1MPC5XRE(l8x#XcGEDtSVz5Nqky4aEQ4Mca7)XKw= z-%2)3B1^pQz*KK9@xI4pE%AQf(lpSIAkNXoa*j6Ev&3_5cZ`;Jzd&;)br8<;!1{ald0gIBZ zq((_M5JwzgIZYeuS>iccr7Wr{AiEV+%OOuoNp{pWJ;6mqy}+WVH>pvyG>8ptST?j_ zJxjcQRV0zM99*}coV?1Rk7P%JmIoIF^#zNd6-bSs6+vue!?KkP>sjJC1-YVYB|ak( z7K^T&>?9Z<*(SOM0?}2Bu9dmui6$&hG-17?#-1qc$QlgE(KVaQbq$s%0{F{%EllzvtilHhV@QPF)6O_*$jRpDHcAQ+?i6FOSTE0Er3WWhR>E< z@+cFQN13qAt99zsX~Ou4Qzy)v(NZ-l9KTP?tg34Mw?n3hg6;;WmXk2Ik?bgB#(=5L zUe;}k%Uag`k4w`a+krSj8_N;e*yh0=Egey^1I64*I4drtc9iVs_iQIHO7v20XRvg} zE~KXJW)MebV>vn-+q{auZXFxDQ^;L&wd@eyL$afq_XHPf-U}=>k0muVj{|W~HkO03 zvCV7xYc7k3i4^uIkcXEiNp_^51zc3HH&_(xLuwRE2C+8{%ic6>^BPN5AaO92vTgx2 z@=WS9$&Lg}2Nwm*0E>W`q(;DgAa4KSMlLv=JB(HGsZn8&3NbPR~Iim)7qjn%_4-ZU{mIxvo>ut$!( z1nmULHix#K2tB~_rv6hvoJ)k|2yE<<8zfU~kkcscP{5;c_44n#(!CN%7l8-&Hrl<~9Fc@%ca$?{jilsaFsEA%N$ z7XYO}*tUNmP#WYSQd9rMAae(jMa2}tX@VrPu21#mdhnOYKberEIZDyj;={p#fC;SROmB#9gcq%)_>P=i4K{tcg zeuicH8P?G?2|>DRZlkbM4yV5oI=4%<8LM{yrE3_gcLJqr7Lc0y7lPPwhGokccF7G= z8mo6x+^v8uk?b^+?5IH&gUfrU6_3?JZCn~b1rRrGu-v%8F1bOXv3ehcopQ1%*&Tns zWSg=208ko)vHBoT8ss5TQ~$#tZqQ)4L4$RS)l7V>K1MlD)v}%7aml77`tdXN1UPDm z=$ZZ}@mLRhJ;kL_@-&EBG+1uYU>#kP@r>2yDDD=Nmc8E3OLo*XFMze6(pY^FPi4ng zeThpW=w%RhYp~p{!8*DoAxMwa*C_0iQ&%tlmV8~Z%~*W{C|$!?eG@2M^A@S8|7{TW zXRzF#!7jN$N@MjsiaQl#GxGO=_a!@OkPpC74@JMLAL6mb>PK7}K_7#-M}y@a4R*;5 z5{=c*DD0F|S1&hxKbLGXR=)sBgD_UV1WJQ^MQZB*8pK&bSk4l{I>u^se5`&)Id|1L zd6CWcl1)q8$yogXY+9o9XMrDa+5cwziEE?fXAm2{u$(D`b#zX(XSDuCai=6s`R0e@ zcgc=A=MS)!R2r>+;;HNyt$%T81pN(Sw-=U!g?c(WCm~3W*3J}m%BjoA+5KH4+loQy#LCb=8HV4bIIatSNjWtL#T9>D=TTWVTh4+#3 zWmOOl=U{m_2W$6DNs4y3+hqqnxS~Cu33vqqh}b1 z2XwGJpo6u$#-~S*)!`I&s>!lPfRm&pyP`=J^y>npYkJI6>j9-})+aUfSAlpc2g_4A z*d@0|X{@Fx?o?36FIh@uB-^w{H8AR+9%jK>gQv1dUr7Jda%uF`fjB@K%K_5ZCAUX3 zRO>11QNuf?)rOL7Vs`{k+Jo2~36%C|AT{+jf;c!D%fZoD$52fyST~}av+8VG{&BUj zWJg1F6L8cL;~1I@)lKo(Lv=H*jg-wnoEeSf%xJ8mYZ42@XvMk}#hr@kSf8cT){#odbP zNHHVU#`VRwfV+6~M5$uxNqcl{Hptw^3FTi5TJyNnwdmIIfIw%^dN8_=E z>O3xuo?}4V7r}C01iR$+h=%I%6n3iN89HuYpCH+0sGbOv_F$-<1eEqTnbg#O3W)n6 zSni8p9YZy-Ts@6)&Z_I!e~?nAOLjC=&j3d)5e?Nd@z_K4EUt}|vq9Vu!E#3g>*$); zTBvl->O6{j6y^A%%lVRRy5<6)7E~Il7viyo>P1`{Jr{$xLxSZF3D(gy2|aqKUPfWJ znmTzEz~z!{hUyhS=^BRWl|bp5t4K}#SA)1Og5|ylcFFBg8miY(+^rxdFMzyWvQ2y3 z0E{{)8mc$qv4-kRTpB$$gSah%<+ccR$?Xvh)!QiSRFh5XlXxWC4Anb;(jE-eJAu+3 z3rJ1<3qjl#!E##!+q^ROG$*%=pTvE!8RONIgIdaOWZq2~XSLkNat*|h9Kd0ethy-xt6dY=SJy-$&vdY=Yy(*w&* z53KHZPrYS-Qh1KS&iaW19tMA2vQ7Oj0HgX}1WNrck(&Bn262xA%RLUP{)562>X$z# zyhd4f^;vnJ@avLos(%9*RsSYXs(*{rRR1=JTO3$!abWeI6Fk+s{+#e0oJfZK=LGH_ z=j2VB?@PA%bHWEeWEB6L@FAC+mV@QA9IR)+m!kd?$~eDoHYI;R`BbtQ@N(z=GcYyD z@7{o>E=^rR^*NqO{>|QvOV<5gaB05&mmr=u!ScKbwt0Xv?4nrzh7xWK8F{SbTgf(U z{v9xC^Y4Mu=0A{{N`D0LAPJTSNwEGPD}DQ4DCVrWE-SA`{#CL~&A$PontumM&3}-Z zn*Rjx*a()#MzH=Mi@xzc6!O%Y;%BB)uTs68fl<9(fKqQ)Qd4g?5D$r9c}N8753)>E zS&&sw*jaxzBY)XnO0v!O?g@}j2$n}eu>K(Hs9!$GHVMRV?*#%5@At{t2Ok0={g|pL11~71KUw) zI9avKnp{4cu1^VfrFHVcuqw%pX44J8(HDLzcXh_hDy zDa2~KQL;^~qkvJZqk&TEMx>_JjX^x4f#n$uZ1d{ATFXLwGm5%vua`qzHkWKu`xd~c z_AP-@`&Oi;_N_r2GK}SrVQlk|CDkqgK8BL+%Gm;)Qrk+lsr-MysPgTAQu+3zrt&<9 zQ(v&0ER1cg^i}Q(@10;oBFe*?vBw?qoh920`CWjBD28`4mpq7pKDsVzgS;bb<{3lbrdB% zmCF;-M@zP;d>$~W{1~8Aek`e}{5TM2`(ioU7wZcv_pEs$j7UT*tm@=gjFTkWgw@GF zL=?m76fU`KfaSIU))`i%IrB71xW6ku+BtQ)WScqj3?S85bLN?NtU2>6F3mST8^rAb zEVm1=&ZvrJ%=5^13*gm-JToEL=6hZMl*Xo!F9b?@5vi&2Vi0!(u-p;A&OTIKM$*g* zu+!M-E#de{x5VV)}PG^a()nlf{Sx*A7Q=UL7C zL9FzYoVrGry%xJ~F@G5=Ur)>GI$h{`yHMYFAye`OUGhe|WS>OI2{Y79y4=mBa$ROM zFP|tkej;}ObirFo1<~KnRj^75-=+)SZWmt8E^HnNRCnlNciP3uDvzEVOsNIB;6l6L z3dzsooQRZKq>J8V7j-?2NKyM_Ky|mK-DA^+x@)j$rmYuiZmZ38J^U7{vWnJk)67DN zN$a~F+>0}%iQ6^x-V&AmSo=Q+hN^q$K25*hru%A+Jt?mq(9{QQs?6%~=RHMqjB1+e zAId5tD+cw=D zIfK#pe*QUB7CqXsM^`7RxZ}Z)eljPH6A87W6CAK$ml5A`R>R`@@cYvG`myp z;@Zp`IZ3uOTW5%esxBqEH*%bG-E>{`M|874!W}u8Dr*?4?wZ!arujygMKeuSp}9-h zT;B+@xinW#&FocT(p;`(U0U_l1(z-rq+wi>Q<`PnGP>xpc2QRYP}E+ZspT}Sk4qX0KXedqT{`mS$T`(}$Gko)GiUbCHbBEbn-w3n0G}qdiSy^JzT<*w8tKqunI;EmCjr)6* zEqBzqnzo)zbAPWgn)zPqYi^azb$_pNF5hbd%}kY;-pDCu$`D9vYNkZ>MoxmNyT4k~ zYiznZauUlMRja9WHq{+D394zXtmfxzzB_V~e44CYvo|cUy^)h-OS6s8^pPdHH*%bG z-EJdO|GDl(8~ZQ>T@vo)B|T^*EWX=`(D)E5w8zU)Igk)ctI#E5saB(_De( zE1T~MF&CdEo2A)diR}q77h9Tbwx-W1(LEvNq3fpGUw^~__DA?em^I$jftq%ZP4kT~ zi)NbaV9hnP1V+UB~yS2>sOHBU2-DKWi~Q_hqjaIB^tSE70&CqdQSf4rujVAI`^lUUxU6E*cD zo9d371l2Ux$(nzP&38vml24PJs@d~PY;WWw+0tyMY5M6Ux;Jv1blr4k=#Mzl{s`X) zvxc!cOViG_X}%F=(M*$_qq*nWT;B+@xir^#nt6VS>5ZI>xJsm*nNuW~Nm>oU!}yu|cIPB~MCz!jQ$Wr^yIoCH;O|5ciPwM}(S%*J=LsHs2jNNj^<>gJ$1YVtXSe$(CljNz-pG(Y=x5r0b@; zMSsMt_D8rQr+is=o2K1v(|jY$9{qY*cZcTQX>)xe%;wTu3p8_KiRq1;YPCogy{lBz z8#!g~mREOc+C4VS{k_U)W+*Jy+*X_G{$Ax=zE_)O7D`NS(S%?`i(~Hs2jNNj^>Xfo6YL zVtXSe$(CmONYg(q(Y=x5r0b^pM1RDm_D8rQr=~1&KGU?%ZJKX{*`r@a&KH{drOow? zFq=zreWjUSmzdtjsa4;7KlT)x*& zn)!2y>5ZInrVN2!H1*dK)f+hps_y>ZH2rs*?v9+qTkq5#n);_rbw^HuYMSdW&Hvlx zyCWybr^)`&>`qHB;cu&%sw7*QtusXawpyZlBgaYCP1jX_L^t~*+>ujT7CGHDt%ptX zjWB!k>&U6l+@)-;Z-m)gnyaT~_9`(MVXm8S)vC8HxOAx?4dc3%z&mo(GP>xpc2QRY z$m?`k4%lz}j7jEi^2=#jADiZCfEdk;ndLRNug!HeK%7ehte}}ImYALpi!)`c^wZS- zC8{UH98^6{R?_qVHr*9sLXY2s8>p!(+f-MGIjE+&R?+-HHs2LuEis@2pwo9d371l2TGR`YW<-yJzg zK227y*&CMF-pEO^rP)Sk`p6R98#zw8Zn_5j5smgo_(qsDjMXSj8*S5kBg~?iCfi7J zH@3OH5oU8~u1z#^(-PAgIs9!^7u~#6)EhZvH>cDVnzp4)bAPWgn)zN^Y3|lG*ZsZ9 zxqPoC&D^HM^hQoOQ-;78P2ILc^+ryDs=NPxn!cS)cSla*W|Z1qQ}Z^}9XScAX|5eK ze@C0|j+`W)CfiA~cP_ELk&|Rgv+bhk%_X`wa-4MCbi3-0*vj znt6!LT%yX1I#ja`vswRo1o7nYYSP$QQ}&rYbIK%jxF*lF$;1CmtWDOMkI)}*r2PTO zfss~6>0(FQ#gYS~bf}n`r;8tB7f%k1j)S(;vAX<+-4;QhE-R`z_>y)ue6y*Q%`pO2zHJW^_O->FB4_Ob4>+}a)Z+}2?U{tFcbg>)lV#$F~IxtJ!q>JBd7f%k1 zjzgr>ExP=zcKPJM=qTSYFmBVt+iha9$t|K87;l@-I{fe%}NdotG=>ONiUe!G}8dWywL3y}wO@dxeV*61k}FRl;NL%RIKc6n>`B+7S;o<}tCQJZM> zOOj|t&tsbP_e4=ikYC^t_-y;6?ibk^>{HUed*0wu>bPM#nKH7;k9en>I1o$p2ZmKw$G~_`GvBwFOH`RrA86Kx zHY+(Wtoq6Z#z&g`u}w}63=dfkj8F6jd}@C{a$scCXS&$ucCqBZ=vat+p^JZM7f%k1 zj%iSRrOSV9mro9ij`AG?;~P!<)+Q#K+#;HR@ttOU|DRcAVEmw2KiaJ1z_9A-7#Kfk z=Fc{Bi7GSd7tQ+BW+exPRbSb__)U|4x5>$Y;UVjR@rV9^KkW}l4vcE`moE0VT`V~; zIu;`T=;EE2xk_hBBnL*vG^jdLl0T-|<&y)WqkPA}=&Fg`Y+|y>EutA1-8HMne`cA1 zQK4B&*{tNiu*~Q(GGitCdx~g5&D>7m+{m>h`h@D&y}@ zy5Kr?!4(|^b$_j^%dS@{%a@OTsOhcsb-}7qLB6`2^kUcE5mOuJ!YR9O{8J^^Gb*hM zX6%9>a zZ>MxFo#xeWHN5UJT^DqY`8XOb=u+;J7j*_*V?K_L4!XsBoTMCdkNfKDf*vs+N7)7y zF(0p`3YLobI9@*J8T0WrfS^~ouZC0CgWfS8C+h`E$9$Y98Y~m@aTHsyY|O{YF@oh{ zKHgIm^ojX+fmE=3%*Q*Ng1+TGdFyqsLd?fm^}&iUABTbm{bD{2#t!<&d>q;stQ7O{ z2901q%*Ww^!N8c0Lv(|cV?Ita4pxczc-KHMsN5&7kPQaMe4N)AtQzxiN^G!N%*X3- zf*~;<=hFwP$9$Z)8mtlX@zS(l&6qDozYUG~c-LF7R=JO}_EN#Hm@iGgtsV1asxv_> zKC;#H^5FcB| ze09V^Q_Pp8owteka>T-zn6I9;-?rSBNzwNIi}}*D{dO^5hPK~6=Hq3nK|bcIp+9zr z`Iv!%9b-OzV1k`uJ_0z{Ip*W7Bf&0lAAQsu^Ko*0uxq)mn#0S3-D19K`eXN)kFzs_ zJz_r2WDfR>`FJ@^uvg5-3w(mHF(0pu3C5NC_$f&R<6}O~x(_DAd>pwQOpN(BAvl;6 z^Kp`H&=T`;0%x#y%qMgFKIOhzna?N3d@`R;iTh~hePccjbPcA)d@{#Ri}_@ZpC0qc zd_E)QU8cYSp$9ytR zpAhrOJbhx!C-d}4F`vxSC&zp;PoEO=$vk~(%qR2o{FqPX>C^U1t_L(G?@-)@ZgWG!%0%qMGso6CLGvKF`{=Ht-W z;MSN=)(5x6e6l{cJ?4}3!5uN5tPk#t`DA^tAm)?x!NPK1jjRtA#eA|pxGUz9^}*dS zpR5n=iTPwLusG(EwLojkCu@PWm`~OMg}9G?YmfP4EpTtSuU6Iq_r-ji4<6hf^U3<) zftXL$2M@-4yrCm_DCUzj!^1J3tQj6D_to*z%;3?Oj~DO;kHvhlW_Ud2lQqK=F`uj% zo{afq&G1yrCu@eMV?J3kJQMTDy@F?BK3PXRSMJNoy@KasK3QA55cA2};>Ea+etRkA zleNXmF`uk0UWxf+ZSiW%C-)Cti}_@|@p{b1i(P^@%6&Pxr|@RXCu@+mVm?`eydCq& z8swdrPu3vs#(c5{c`xRZ`wZ{Le6lY2Am)>G$%iqYtV=#B_tne2hmT`ES*v^!^T}G} z)0j`zDxbxCvR3&#=99I`7crl#Rlbb-=-;nmKDqbsb(t^C5K0B##C)=T`8MX0^~-lL zpR8ZLkNIT%@z5y6K3Tu~6!Xbi<>#1B)+)c0`xxS>;MbT>?lb%r^U3<<_n1%C zFMq^*vVQq9=9BfyUooGoU;d8yWc~6_%qIu5ck0Yp-qtO^+H{&FjOv_lXHuM`-X-Z~ z(W1I0-KE@<`>Ya46 zCQ(Z#-Q12)%Ou^bOw_VTH;WUsT*6($`b70fx>=&A<&$n!DXMSM&7wrD;BeDBD<<76 zO;o?6o7IWxpLDZ8Q7a|gtWnf}q?=`m8klg`vQkkiC*5*F{whf~>lHO9>8|Fr34_bs zbb)?F%c@DYymE9khnqGTl61?<>sC*=v+|asHIi<5v)P(Sx4biCXwogO*H|m*miKfG zOS6Z6^WRh-qOK5e{Ew87kNx1bp-fNR?etK11(#--xWs`1s`Ajb9 zW=WyylWtZOYQv1IKqwn(~Jlc+5d?lj91wN=tBZ}8nZ z>6W*pHYMHi($H;^Zk8x&Owujy7~eMOmNzi{&*A3xWV@tW-a)v1(k<_V%qQLQ!sZI)Wo*K&U#c<=cT3fF{Ebrnd+E2gv$f~+&#hXsb9UVcuhq@F z_n>UsgGViV;MOOy+f>xtm0s_+r2e_KGk#vQ>}ea-E&a*!3txWdf(4uJw)|bYzI*1PIp5#9@T1lZ za;LxY*PRP4uFBqa_bxS?y|??KD=Lm#wBXe97A~H>(Ot(koRaPG`a_FeT&jESoyyy@ zKfm8Ed%)FCELx{|#YJNWoSvOJ`ts~%1K-cy@J#ihF@Ij09rO9n1?OL~(Oqx-v%#WX zb3V`BvCE6uV_*4Up_Cd0_+2aBbX)e|UvAAV*rD59KRtEEU25&`vrBhgeo?Umxc)U4k9z6hr`z}6 zJJq`OMXR;vhF;P5+4a5K#|}QLam}!XyYf7yjvgd;9ETUTwT) z$CKI*`Fowh=mWlOdvnpC_QP)-);NE{v{AqOJhpw#QNs%#Zaiw#c{{wp$DJ_RztjPP~3_`=fWf(pDI}Y5T|5c5WY0=-a-} zb>9@uy!y4)*%v<7IBdZog-JEtiEP`>wr7j3dC}GMx7RfG`tFm4lcp{#H1xfz z@s0C0YkX+)+Y7rLQ{UdY@da&Nnx1W2VauJ{vO{|p&bs{1HhCgu!+{U9u5{ryFe5 zI4AX0>sRB~Z#;Z%kH&Eayxefax0jCSHRY1Vs^e}d40&dLVd@HF8@Jzl`L@QrbA_`X z>)QBZ)9ZzI!v%{E{rR!QTht6_==Rp&hNaI~vF)aJzi-QZbZqO^Z{J%uYt5S)hK@N) zw007`Z#_PvZRb@^ZTxf2<*iLSryCyXQrXac?7fAf54gB>cIR~(hi$&7AZ5oNeB0e$ z40xxlV!$fxXO7-)x+wf6s`;| zYrAtyYvF_SHf?xh-Gkd#eSDX;A2xltVayxLw2iLo)c$tQc@0~BaAV<_cgBqPbcbx~ z-*?~I@ZCmpM=YNI+~P03*`t2;&c_!zAADTl-teJ9zXuvdo^#5ojc?A_s&V0i3kyBZ z+q7}HHByDQuimQh!3&;im{I#k!`f2^G<^TMYG0I_*f@1UrtsvzEeo^0?lY<;cSoV} z!K%XPGp-uBa{7frL$C7-kFB;s+ir)w*jRnkj%}ZAcvWFk&pSpmtUI9njg6LR9CmPR z!$HTr*|x_?4Tbb8x3unF{dS{#|1ArLHC}V(%_GnLvhT>PruAvBnow2vaJw#zv)+HS z?e4+Xww-bK{tZKKoWJ47N9r2Z-|WE=v)cN%S5=(a_V|;pHh$lvv}#(6DLK)eQ}e2Q}>Y!{Wwc-sw5=>=k}&ykO#utxvr3 zN5k&TUo9Tcy2*$>Yku1B?yG0EU2)x*_Cx0OZ~S@k@WL5C-8ACWs(ad=YWT45%d1r* zKipXx6~OnfKnCz52=*8#~Y2s^Pr-D@Sg--#3e|+~oVht(D6ZKKZ9d`>6-d zZu{=*%^JInSiJcBk2h}kalM@zZW;Z4c)~eDqZ(8FO?0esx@7_QgXBufKg!Y|1{#;)D?@o5tq*Sh=lk?Dn<+V)y$ zi;+Fg>N4WSU!H9o^1{ z`Uh=w(uf&P&o8WW!myG1pLAwhpJT@sx*v9JVfJU8+xEKc*uusaf7Ed5)AbE+-?>A> zSra!c{Pyc(t#7@uLF=;jUS9b4^|jiD{=H10^3ttZyRUeB;o=3$x8FALixHKpeO>tb z=gr%K50`G3a$rkgk8_T1JM5N84PU=-Si?hi?xtgM%X^Q%XV=Ad*H;a{sqyA%FBVn{ zI~PtmdwN6XS3YRC{P?Fv-gy0nh1GT)DC2kT?Q7gv81&+)`ZwykH3!^t@QuS8_MLwC z$Ub*$UD&bW#D+(IIYEBg=I$PLYMXz>dJX%`*|%}x)*}l$w2yCl<+iYK(i=B4ociRE zty{mbQ`@lFr?vjF?0~`}-M4Re?}pZq-|l>L;qmJyv@SLJAgybKmsV{5`<|-y?tPDK zZD_A5oPFNB*7WtewjJ~O*RB1A&nUdN|LFFYMx8VA^X-SUpINtOVV9*hXnfa?UHSmZQp3m9_{iyR)6=8whJqJjC$yh0qs8yn9;c1)I(d} zncJ^z!Sx##ZeQo!hD&GkZv6D8(_7U}KTG>`x^CM4ad6)8RQ~TDzt2Gt$tWagi$aKu zb6=N~-A+V9duZ>aai=6Bl(eK&nlw}z+}9;hqLfmaA0^V>ODX-%_jmp}|DDG<=Y3z- z>w3MO*W0dO5)0e@6luZFH3ImXPo$|xfW)vDpRGO z))&L>ev!Y<>xw^nM`OX{Oc6hOf%8UP?)IfQeht;*%c1+>_@O{_aOn!Wn%0WOQH@j( znJ?!@&Vx{cwql*h6sTM%NcZ|};(_L!$#U^EO8Z_8-cFhFl@(*b{OS*qKN^GSz0Fvv z*C>}HRf5JLd%V)ZjLY{4LPL=)d+U!xZc4$^pY1T9&H^k?*yH5OL9k|r2UP4jfH@T{ z*)ORtxNS@24M`ux#p54QQlcPD)Vv7)rZ{l^WrX!zuX0_c2>XX+a*OYR@cH*$mA}SJ z)g_nxJa@P^mHk^Szd583{%s$KR*GEt@%R#zuG-VZNM zh7h{f3Etc<lXU*t?0kB=3g4leGEJMR(aVFgIUe zYQvqL45q-=vnV?`79343!q;xmTyk#=m%P!(`MvGoj^BUaJkDR%J1me*u4|HdH#Zt86B^+OvfgjX0YsjjpY-bt(J?#&`->Ka>;6fiBvZ_#i6!ThK+@qEb z_df$JhT*u#=pdC{5QL;R-)P3x5I)gr0LJ#8jdovNLB-9x)X;JrT}}H+1qXNF_H+vio54o34ei z$MR@XM<0mT9nL|1iR`4A1ipg|Jo7a-LCt4JG+m=9oy)$;Zi5Wa1lbm@AorX-DtfwHxcn5R?EF0;`uRpI{6{|T5OIrrAZKwIvL;l z)`|VrY~_y|eQEg@e=*I!n0kI%Opc&~zyEf_`zAK{pKEXKYO7>>qeJvaXDn3r7o=;~ zyU>@2C30-}ThVMo6Eq)ghB?Xv+J6qQ1?S_1Wi@bbjxYZ-iKW_e>u|2E0jtz!|C2!? zf4LU`IhED$vq>p@9U%zqqUPZ)wZWC?Iu?3n-lxe&Hp_+!zr)U%9XRWG2#Yn6oOWe3 z-ucm@;A6^Tz{$Jl%)M>!ab;$HrhPBST|16zd+Orv?4fk(bQTqR{-LS6kMZBm%jx|q z6&ZM6p)2~EDC40Zb*{04`<^~<^hJMmY8{Wkx0TYc;XB059Xs$`I86aDZ>j0^L44Zg zJ*~`Hf~NNcsfUgm)+Daw-(K&a{1%kIZYhV3 zcEYpS3Mr&TG5jp-#^vJ#!Sswm$Z6i5Ph0z;#i&Erdvp&DwLJ_oiUdInC!U(r7ad0# z^X@n1>=&oa&&xt7tFaQ-^{F5y=UBAzmZ)1V6Rxrj!)t*bpkk^mc1s;ZNpDB4Pyy5RZQGV)M9k`CFGntXA36pj{fgbc;2eRdkcz zOkadEa$E5}hXdHPvKP+T+ERTs40-ny3qD@C6SN8kaKH2{&^dZrEXzI)8R^>OvFI#B zWGsbr?e(ff#VP#!LmId^eiAFA``|T~V73T-M%5Yj=+|DQ;GLk9{>z!gKd&gIri16H za`!|U+I5I(E0Eu7PwWg`-X+iYt$|0rQ2|$iz^PY;sz_qPAIn(Om2$T9VgH zJ%}7U7i)~NM5~Ou@a~(xI5%4mdNm5t$Rmb0v&Rbh;_e2DxhK?mxfLFtk^}2Zlu`@y zhx-aWoKvV2df6Q1-(_8S;I9Un@w6SS^w#Ca2|MZDT5mC0*#%deP)g|oCvZ+=C9G6G z2eY4Aa@XV2@@E$o;mY5}Ts2n)pWuV?+pPyEYkxj3cCfKxc1>ZUX>08LsTogLa}<2?UO_Dd`&0ydx`k$F`L`QCNxfxv{*i){jeqmvHDO zW6!=D>~Y(KpFI7_GwOcr53RjCkOuECp}0s*VfI94_;y*7@kc55_KxLk=jX%NE{%|R zF^HC)59d}Dx&;YC&cU+2m!PO)5rm!eMz4hp)W1|Aq)!+^f^#zVm>o!SmmGx|Rllj} zbPH@AsT7tbJb`}$Hu8#TM<{*5P1NjZi$BzAsH?_Z)eQykm)4WnxvN6zXYzsX#dW2& zzfVJg`n&~Oe}H566;f2PHai~35WoF*7|ZS$V_m^U?6bNZY_8EPXl!i7@BCufHnEuY ztiAw7?We<(r?=73^S9Vz%vc;5V_L9FyE_j_NP#)s2UAG?4e{dQepq9;0&~vn%x`7b zB$ z=X08~sVfIAXLdK4$gtI<{R%nqanO1&!&GkhsT|@B=2u=Y`&=yd;4e#)1Jg(<7I6&3)kQg zA^u=seF%DAI>`E`KJsnDXPo%x5uA%0$JJ9@VU=+eEW3A`eJ6FstamHP;`mhflAH$F z>mBj4xiuP%F~+oXLFl2sPjvqk!@(I9FusLO!KrJpa&SKj8uLfnQzy3_4d@>LokqmM z^@E1^;mBLI>vvQ&_=Zxtmi7kY$6tW3f6X{}tA;SPP$6uv{lFe23Fov1BOW&51tbhK;khT4aMHF6@H;e~G}QC& zL+jHxa#j$bSsA|UlZ5pheaOF96K4;cEuMIxDgEA+O@ZNa_*hyQ{9W$DZ60d~L#u*l zqlr7bx@CbsLia&tmp95RYP1=OhW@}0n->!1^UU#Vct5Q0*T2neYY$5rzJnzBtGN2C? zLyO02@ZuW{X|j0&F50VwO^ti7qf-%%Fm)Dlhi}ECqisaj^+(`<{&iYCXC-tE--17? z#~^gv%}Is|wtTf8Zo7Yk|0*l_-M1TZ%buq^Ue3&+)<;eG`TXVl#lA0Z=xW4!`rP6F zu4i%l%MMgJPlvne8Nom6Eoktl19~r4O0O#rR!^AALr*9L-404L8W>F_k`rf~+k(SS z4Z?ulN%F{?oxI5K2{|{nz}>4f@N$$lHw7NYR$Ubw-K0s2p0#9%S9ip_wQ|y*_8eGP zh#wb^rP2Xo_^r-8Sh3s-+vP5!lC=qx`dN>g3N!Opcr%YN`%TwWn^61iMcLKs9*jPF zgthN#2s>vf1=j--zkDJH-@SV9%k=^}f3fgNp%>~}j^iPgmmWETx zCZkUNR$Or57A}9MA;c@EL-x$AXs?_EF{u{(?y)^aOtPcb>nu6@&qsdQdKF){3d3pj z9WXekkc{tiFX(1didCk~v5S@CBEWz8GaORT!gmM zTng5t%HZ6kr`p3MIj)%?`NJbx^mZnmh|k~~CTG!f-&**4ZZ@~A4FnUPJpSjN1R0Y& z@#jt>&Trnrv#Qe;GFaGy;-5|Dlcn+e-DVQsG|+}!y1Tfx`7<21R8zXsQA4mOs)5dz z1E_RY3|#KE3T`}YUU1oa750hMl>Vjl<6wJlY*VHom2BUK$E7%Npk+rqayt+8t50Ka zw>p^PY=oyKucN|)N~!$L71Vt{69$t7m``_d_+M!#>fNGd&8=XWOyI zRfTZ0;vO}4--XXMg*=Jssq$MnHSBQZ7B*T!T-gTh`c&!pY>XB*e-FH2jSDaPumM*O z%fMY~Y+V{<6y#@5m1?}sVl(k0c$eVW+g`>5|HaHcFc5?=jcZ7y8exe>))-=P@(cR_y1=HwB zi{s=}H5b$6zv91**{FYH4eF^j@}P?8cxS66hdpl#g%#`3tU4cjAD_mQvySxrRielG zA-iy?y)jx{)L?`1?r4a<&@-YxDAZAEy_N-a{ayoN<|PlEiGr|As{s=4g>%h~ebk+_ zSZFnopB~7S7fhc^-yVF0s~64Lro&u_K6n*2?Wsi1*>zxK>ado znjn1mco<%cH|AqO)_DA1C^sukLO;!TTCEO=zos^dK~w&z?ytN;PtR_L&F2j-9*zJp zNYjn8g64Dj$`H`1Plu6THH3M^iD;R*SEcJ0!he(Z!|qx8u(G6xa83@_9+*Xc2LzLu z*CcqcGY!U36wLo32&Jv|Sup2qf7cHBRA5Z(H3BAz}KLpl$ClEZZ`G0f!-HgyQ%^6w=yZB8ee@;e=z zcl6^p>z>%UxFyP6^eE1DGZyb$i_@=sg<(Zarz3g_`VtcoIV2+^WVu`KWwMQoab=3>tK9rd{VT1F9-?BHF%}&vDk9P zYcc)!T8>yYofG||V3GZETKF| zTTtBU4BC#)z?1$qcxXX;oN{OhZAm~3ePP4Zw>|mjx0jToP;13V1G#lji;JP!8p5e9 zKg7+ydSX>;Z9Y8R3fs>o^s1;;)xY&3&tLYua;5{m@qYfOL*1rULV-?ch*+1p_J2|r3$E`feMJfEr2!o9g zt=Kpz0!?E!$tlJ0T#=!{ylWM2Gfd*X>iY)gB@gC?S*OKzCkD{w-b&eL%ptLxg|Do6 z;TcX?asz6Dv*}K^!+dW*3_Ht{`QoTUaQk@}-rFC`zT1>S;?{ng(z8si{-hyjrznJk z%}cpWC$(-UnvTK3dT|MDmyKtnQM)<^^l&eL^eRDE5nn+QULU5E!$y!{q{+d?ZTQpj zQ`D3_8M}Qs4gXcl6J7S&1U>DIFsE-ScrIUz?7J4r7aOqNNAXm`{}Fs!;e*o!`*+;4NpNRTRk2Qx*O-GsW0vf_)^0`dy;5w(g{A1 zS3=?X!Th7AHfy!B#(*VyY|!o_72LQZD~DNOS$`XzWUl1aCww{gjVsRBHHFA)EYJNQ zNJDNsrSji5B$94u zWWzJh({gm!b#_Rf!_oCtbZMtjI9>ajdK#&H880pFSaX7J9iJrL3d)1Q!*VIFJ^>Eq z_~Oj_>v_!@GdXy8JXq{5Qn?N;lE=Pn&G~I-@R(nZ(5WbjW_FK(+z+1kaZM$Zn5~EJ zTSjv96l*rmHKzi#x1yhELxv$^dH(AtkKN&FpUQluIQY?F=>PJyST(jY-<)47y5t3j zV~-!Bfm?jJrQ$(Zj@>pBB>3#~wO`nW)A#?G^GYin$5lfGcXbSq*AHV@u z;$BY<<~a_|#!YFh*sHaMWH|dS)t@e;x_8HD=}$q5=(PiW-WZG>oOjXNnquhvs-K(p zbV0awe3>|R*#nv(d2{0fUmSa59eZp~pdk;u_-$<-46GbYMVIDdxyN*hy;zK|+W(}l zJ}*?SMkIrGMg%`T=>`kh?v>As6KTR3b-q3Q7mfNiklzHf=Dz;Bu$kKLym+Do_HVy| z%~NhrMXEx$|NRYl_6?H9lJbq3edeLeFyDb}*|1ft7{#@4@Us)?8d+i!HV3kd$yH?QDfY;=Bsx9^1 zt|4XFMUm;?!u$aig5*5zF0c3)Nf(MW1<#hf_m}xj67>lpGJEwxTR zRVJ=2HN;e#W@O(?5aK2(g}HV=McX*FC+6*sw_3f0r5{V=9T&rR#)dsGf?e?EtHan% zIgoQBc3^SC1JU@vC-MCTf$hInQ{eesxNBw=nS33?lNTO?uUm$SrDtzpqt#Z-=@g8u z(ihT>7sfm&DjNT(FG|T@KSrKzau@c-+@PU*?^0sEAhkCL;ciYTu&wwS+qs+K<31g@ zODkEGa@iINerrnccY1(Z+XSpnw!{;n3M~_FL16tixn(z3kEkDU-0@ml9#i_9)@rrq z0(DmYr;Da=&S?+tJo-z{a#adT0+hn#2@Y)B+=Z@X&*T;BRPv~^IWTaUAjMW~#x{Q| zsO8x?VwdZQFfHRFIQZy`x^R%U?(OQ4LAlqe>@PR8FmcmPY<*0QUl!DBAlPZ7E|GGO>A7RPhKrog3YmW zTx8P&M!TPaDz#sA#!EfNq_5$K{t?`&ERMeEYZh!=))MnZpTsejQ&D@Ag#K-sk)=9w zsrbDR?1J;g5@f^D^7;vA=UG_$cL&Q4%TTX<6ik2b!O|B?Ugh2?-*|Phs@XR| zDt%Utb+@D8tOX8u+>+x2( z9Zs^j2RM@U2W>728c5@m#}O3V?Lca0{w2p!imrWXj7z~?dF%!sD|?ta9R*DJ+5-c z@gLY}u%?i!#k_QliY&Vw!5eTF9IZd`^%fcQ@w+>S;tTrQFBjHTc*3ihemuk>oF2KZ z=HHJVsCt>`;z(x=*jlKhU24DimTv|OU6;qJcBu5(mh4iA=Vpw!15gw1WWG(kH@z|gdG=8X+=g?SnwiB!=IW_CZE*+o4)ManQ zEA1A8;iVOLe!Zpa>T(V;<&D(;@o=E6_4{8J6W-!jQUGuw>VL z!jfpQwDCW9ovn%PnVB%}ls8NcFU#vR;Q$4#-3+!(3L&`IiS%`xDeJ^pc(QX9uud5% zM|b4NS2J*SVHP_WmWfZ+_2=V8!(rCYk$K_%r|ID$b?q>Xk^L?_gS$#QS*~^C6UB=m ze8??%e()w9oHt%9YaE4_FKnkJ3!|}G8UX`!@+f`QaMkJrP2r~YXDFF%N1v{|kdLgp zF6yoLMn`u3qYIA(Nl===&F__Re)1b}s`gVlGx;}EglGyT7aHj4mGxA$W;?9@pee|6 zE5T3WER5>yfJcsK#D|6hG9pX6CNoxbkacoLG=*xWH}Gf+X7jp zPGT2J3$DH63a9Vxmpz{k;?kLFU*GDUyvSe^_W8br7B*~y78j1mzxKVNl?o-o`+3mt z-!f`*&z8TaAzQ~wU*y>}t~~bITAFrOA?dG}$yX*6Lc6o~Xj+4p?ArQ+_-OiUsxez8 zU+W>uAM-M(;*q}CyR#*2kJH8FZ!J(BYDRv|Z$Vg`&X1NXrpUO@VmXb$Q)*pa{%SdG=zN4fJk?YC z_PV%i#!mTNVlOZ&Q%K9E9Hrt8D2|PIOzo#fLEZ)rY<9LawtX|){oP($?(=LBj>VR2 z;;oc67Te*uFc1FKw+l?Qo(^7j%V6u6DEMHrn|c{oV{>gIzBOq#Z8&gT-u1WzJ@j|il`-|a~{c92>Y3)21OM^r05O+=HOYQKJw+OK&SoTnb&gsmS9RBjXJ z@uiiI<)wQ+KxA73_PgAhZp~G^?22PM~Bc$r7!!AD5vQ5Q+Umn#h9r( zl}_&SN$CfR4@;@8=sm>o$>(7Y4BLqRO>s$=l{TwTEVU7EbDxk;CjcFz&}_woZ%|ul~3ynm8wbqndA^OLt|zk6+2Su8D%J2f*Y>2O%OP zm=!A#z2AQ(gvw{5T zn?20xb%BPpGljCBxw!UTwRpL*4=4KmQN_HwM9R7}+LV?;pQ{_7uR0%2I^={=xtqbJ zb|bWL_#^HM*o|98&48$6PpnDXBW71?almXrpw)&b1+x&~=~l-L=dMg!#CeLmr&_{p#^ zQj1s48OKvh6q2*1LYh5fFU)A8A;jcIbJac#7MGb}ZLuKi*%c|eNZnL@zzJX3`jW<| z)V$HTzIbW9Be~gcg??(bqt5m__`Pb2WnZ6A&I=WH-gY^^WM3OzaK;Z>#dL(+j3Lmu znIUGH?xaYKVoLWvNd=999D4H^Ei6(>5nh@&!NZEOqLjj^3xd?j{x>*9wxuXB3Hk@m z6pg11gQ?|4V7lWQ*-mx9q6$H%iW>(#KddKlr=!?ke)^exdNrD!sdS0T~j1t&5>65(jvLMazRY(&e zX3K1qz;#3CNFE1M^6=fz|ta`S{U}?xad8i-xq|k8xeFNteCDG6ND~{4~W50CoQC~H8!_!~53=#K##W^xIXA}(Z_oNd-Stk(lS3|$ur(ec z<|?JQplrI_{}5bF%p$S3Ie4CYAzKg6mOr%95RP6o;Wb&0Xy%2lFx2NMjvIAMR;lY! zcC<)EPC9sZR!?!WdOIj`j6Uu?GZkh>PvTFz&qH)w2DFS+N^f0Tviqu;09!)ICzQmN zpO@goCGI%nfg66Q?g+nc$H+V98t_H0tuzVyLiyef;=>8=MHe-1^z&^mZa(XnYJRal zduaxuJ6r(?73_2Wmbh)lXz__bGPGm0M;X^17Z=XP?SE6KQN1~H>GB9p9M_KS_^cC~ zUQT7lbcC~aPgDQ&dFZfQtz$v}pAGHCiL>k=qUAW4?y5v%%g^-dVINFeB}m))^q|7g z=P0{ShS5_M{QGSrC3+$oAI_(fqZ{Gh2&M0adH58}aq#F9b3zH2_%Z9$^WcOJnknF3C zKMq9WLQQvAo7PC=V#ExXBuvFQP)Un6t%jo=h*u5uwt$`{kYf(bB8I! zA%6#=*U27Sw91qmUUx>_r3b`zQ#;^St4rX1LLsfN(ZMd^J!MseC0eTM|0zk3+(sqR zy<37*Fz7M`4eP~yLR#=aPYpVe@{aOGFOVzScOsuln&_IJ4{dd0s7?2O&~K%$JSy2A zX?GG@l?-K#yei5v*v@7C$H;%WHLrGw!rLxI^5#$vuBbfB;*?|Jl#L4Edek?n{ZEAc zBj$6-0W-Ey`%$0wOoOIAYhgioBJugfR5GX!TCG`znS1xsV-G>_QfDXU3WspdHGQBT zZljP-0eIRz9}IrZG5*0^imK}?L!hQ~GGQPHDi?fpSIq;vKcgoN zNn~Vjomzg@#H|n4@UC;WpeQU1*Q6S7$+DTe$3Y+E``g6=j~!H0{GRUQ{UXyXEiiL% zl{hfpk-UCb;--5?AZSK+9K=;M3{qlLL*qlQ6ThIbTa}&neaM z)RdHtds~l!$6+I|!cZwiUDXht+V-K_vU=v|*dD%jO2IafJ~&q03;&htU5s0B2M+tI zpMeW!KKq6{L~on)rL6Pe~=*PuQ8#g+pJ)OT0ei=o(Ns)56R9JOYr5QGV(6( z%4yE#SeExwcHVF6`7-yD{Kt4WZCR$mQKQr3Z?<}P<7^;XEJir7wgBqqx0MfP2~xQB z8QIt~1ihB-VeTiWQXy<07q%&#}=zzwIP`e&rcxbqm5* zZB}sQF9rW+HWlvt>xJjzZTaU*Z~PKtOwym(Y?rnHQhJ`kxNeS={qG12{~E*%iowv7 z?ZwM(^x??0Y994xYaD)KB38Tfqr3JB);*j~?+wzpsd*~oEx69}#!TQ-lPtMgtQKVF zOyjX50=dscg|IeDhl|3BV4hG7h2hay=cWXe@*(}}RmLSbJwP^DPL|hvFlc2Xv@y6X zThH0f4#D>+@_>dgV5bw0_;(skse8I!@p#zRb_(jXoeAz!)Qq94Cznh-0QdLIqlPPY z=tk@-(ZoL5*=c<-;eppj5b_7VhmUfio-Q4yA`s(<@)cK@YEw`X(=k9msu>n{!r2 zPpDN?}YgA&xzhd zVer1zG}opd=aL}2>M)7lZV2GnTHolL%O@yo--U;ju7dcdf{+(u%N~XuaDZ-S?(jMO z;;*wKY3{$(=;MD0!Y*j>@P2x@X>$_n9VkePQgk>)w@|cq8i`Bl|Ec0Cqp|jeT9zD|+WQIFH3; z&Mk04WiUGw&WC=LqbR!cA)Fg*f*bxchhPg`*4UIlw>KGMn=9`iE2t~CpH>4Yl0Mz= z=^-{eQwp2^3!pEHdccC4JJfq?iBzN3wYe>8#i1{=(6i$t%Iy_R)gLP0nD;aYju;CM zj|xKUx{dg*$3yaTH|6w~dA#|Dj6d(^kd37kh3tAk`<_X(^@EZE6qmrGNG(9sJ=Z;L zCp3>)jwUArVTrmY%s$^7XOut6Bfar*+2;4+-bd$Qz_+F;Btf+u0* z(UJ1?$?^1jXAjQ2F42E)l5uPA3UO`h4X9hBDgCli`&*h5#2bNv@H@$a+nDv?@D*FQ zv!;4i<#Q^&9>&YdgSgArVwj~=2*U@h1|D^=v#9RlH##%-aLB!1F_wb97tUhF5a^^#QPquq3~j*P`vOLt{kw5?r-SA z|E07**dfBFuI+e=nF2p7-V0g3OyPx|4i0Yc!<3EMIKw{)i{3~yG^s21=x-!GGpVMs zZT<1YDtG>#GXbYh7o;cZv$wm0Hs^fWnLlMxkof(bD=M6uW42iy#l06Kqr$aRcX1`G z_@$mFYJJ(batRl^U8Xs0l5la(0-je}1=o)MC409s;5I;z_MQmB9lH#1aDRoQ>(!lu zckC7Wm;aQX7>yCJA&MTP1@Jdt8(b$|X5-m2@*`d~=N$|6cp!^tTjs~#Ur)yWULFwj zDG0niwB;V^-)DD74n0~ph<+#U#@J9bgYt~z9#O?SV5SP2eLcb(x2bnSV)QszOH&GJ zrokn2j<$w9qsh-s%M*5g#aG97$SHm{Fwa6uT5=!@zFpTt!yrHTK;UdXu1W>BsJ#^O zMu6*zX*@8%jN5%kkteCSyCv6;!67w6u4o%4H$VRh{_7e_4)t$fyyak8SV-XEvME2j zM>ss1)s5eNmmoKAFf6dYFYmWiNLKv~*mVATkaFjP#f<;*_KonNGS@1qstbenWPr6E zL-BHW1a)gU7cY0rmQ&;m0zpHX=(_~QyX@f9d!xu_);+Kp`v@%O>VWy8r>b5#zu@@F zUtkf~oSxr1DGy1y1j;)Sj(*sZWAiknTi0|rwe$nc^L$I&&5gOc@~OB;u~)QrP)dpe zy6`9XFK9cwr1Isyz>k3jI={;n4IB|8!KKe6!2+T_R zB!8T%W;<22u=>`1O4ZZA#(Doi#NoE^)q5-bySkGy7Cja}9v{X%TlW$-pACgo%eqi& zKVNiQQY|lc`6hS$*+_1kC-K1Ig}5aAHqng}q_HJiJX7VuUlv)=y;N;n-c}(Dq98V$ zbXn{#7(rZ}AXzP{_mCd1<8@nlz{$Ft~f)Rb34P8PlB{{zH5H--T~ORutR>mk3NrYh=G-F-RZkJ zYoBF(7v!hI`Nvi}*wXhmrCl$hWmz>ee2*DAAG*BW0hA5MwoN+Ep7cyzy)B|bEX+dv@axyo;UkUeJ{;`17AjQ*WWj2bWeLMKiw#|^KFYecQ=9W;luq< z?P18d3WapAQ#jgYv|{fi6R7xxLh#Wp(x*0aN=)*W)fdj^93hp{7R53#jjr%;x8@bntI^EQef_@&9FMg%D#T$H#iP4w@4HW& z=6eS0I{|98+zU>v6vB_R(XegxEvh}E6fXD)!oGGY-tTx{p8oa}q^LFX5c_8Mud_m^ z=+q8QR_&wT2B+cD83oL%1DV?XfU_k5sJZfzSYLPzyj9EL(1#%EbABadxqPRDQwC%@ zp%oNjhY3OYu3vB4UU6zh=yR~rhyI_*VWJV6R}6>Zn2>GEOvL5@a4po zv}Z~^%}f3-zxVS8kT`rj{z`Si$YcZY(V0`M*DrmbC7RP9`!4Wn?=&1g)|p$l zIKjz3g7i!O6@2W5@|~i> zt#%bq_c{&(AD+tpJxdVWZky5fb1~Fa?XMmv6QtUOote|y;LiGi(5IOptzG(1ygw}f z{MKo4N*8TvQs30%AJ`AS`);9WRXNl{+YudCw#EB{4LIw=7ZH`u#pLOU6ggGLBV*uT zE^#SRdvalMP=QW+;|@X$%W4FF{gm2Z6?(JR&dGU#hA}qpg^r>Q}vaSYoHi3_>M@FV%(TuQ}G&(=e_dT(p4-Fv#-b0+@MFhk|_Fxvi8lRGw! zl2r|h;N0?4Fj6y*Dn^@%I~x@FFAt31$ImqR{arfEGh(63ajcCUl7|_3ToZ&VzY}SB@>Xixxe=FbX~qt- za-jyUin(#_IJ46Mc-E5GVbgYTp45d^Zw9eORT0E&*Ap-LuR{$3b>E9yNuP=j;IuwA z`22^OF_g!_z}FKvWnT;awEiJ<-)o58uP7C z42{yh3i7ns6-Qb>CRW3#8&@0i*X-v`O2j&v?`-Jud%XJbFr85>SG;gp34lLKh8uvzNIxa=?oNawz8rN z*%RsT=5DBY(v(yc*6cVn9G{h}15wQoJ`He!)*9Dox0(}A@t@45YG1&8W(=mBSV%bo z?onfNJG55U*MMtoT-#2
qSDY(86`5(IQN~MhLu8v%w%sx`g)6|Xiy?DD=p*-=d`#}u*jtq0DLsWIE$oF9@_2mO;9q)q=J zWnv-U|^Qe9RACMKD~)a2cu7Hhzsi}zqwxChwm{6&4fDq-w&6Kbp2 zMOs^VZ$E243~73TIvxr##VM~TZNfTeaT~^l=9tR$9eE9Kyf?O+O@?%9Tk}X zkKuI-@#3$KkgOR^bB>IM#qU3ZjQ?!UCL2tBqz+|vd!^xX6mgXE7F=@TFIWuk#oj%i z0o^xhqwmO>tP$tXbQ;ZWM!cu-2}8+6DG^d*w6LXbE*r=7&;FuS?eN2*%|~k-C`Mt zilJDQwTIuyc38A)oP67y<}MI zGBzyB02dZ=-RBlT2r|FN)}DDl(7h5CB)p(&0}s%B&uYoR?L1ZM_hwHE6l87(jc~)S zYY=sP5NLEOq~RI`qLJrPcJJ?YsiI;cOi6f8m+LyQsT)Sp)=etd$t{+~9XlbuoR~w?}B^g){trkq@!!l%Y9Z7n_FX(7BMlEVPFUeku}V($9zR(mb1a zW^fJXIdxV)Za4L_ioiKmLs-Pug|xT)6MY%HjW%bVr?RbI$uk~^4t#*I*?q9juD@c9 z*?u|_-k(ls?8OnzUnrgL0k;<{Vs9oS(D;b`kW(cHmq(Al{u@`wWpNAeU~Lbi2_zlp z{aY&MTEEdDoYi#V34HI{35|TJ;b>$vjma6q?t2NGGoU2PxYQG79BQL~{!2l3X0g=g zOjoj>+=I=(SW927KcV^|2k`UbM3^>zF?Cl6VEG+m+09-$gVl>?_TPr!h5%-!s?OGT_ovU!ajfepK~`ya26}Z%VynCM;P>+=CQR)M@`IOY zMdTP};bx2-Yx-e9(+XDE`WZGU?}Y^g3)tJWf{@)s0&RU;FfDM$XO1^;_ve{VP}>gP zFp@?UypSAtf7ty_2khYI%96OPpek#D_W13zK1`6cp9!Neu3g}DsskM=nu~W9-h<_O zgIO-`$8FT^LB+@&xclZr)~Paut=Mr$>N|?h>N5S1{kjc)-|E5av7&6wnowM;;6__V zX<_G|YSQN3TjA|zzLR;Q4n-qP+3h2rsB^GH_1#_Joob0xcJ~CErESATQZ?w8iTI!4 zK$_K!&jQ!$v$M+4Ot;r$dNv{xs#*-Cm7yc>miI((IH|zg;kB4O#UCEopA)+;xdg@6 z1fj>7Z)A7BH`wkPh$B{VeWYDJ?hBpEk^>mL-!z2{xH1GMjt`Nt`aOfBs~^O`xt16f zGTBA36sZ|NMt~18;sa@IS zYsaOLs}yja>mOLGxRi4DYEu5g;rPO750$0}?5`#Fmw+D|Pj0NI5P~pC2K^r-z*x(% z>mgw=f_|ivn`0^Z|>2#1+mO?yBFI3)|I9-oF~r$1tIw0Z)Y2` zGvMwVODAg@-%LC^*K8T`-!Bl_?ICyxkJUhRImjQ(fsM*DtfVCbV%_2(|0apa@4*q{5z1 zVQInKhbYK;cn!u9hkdBLZj;a+SDRye_@4PGeaoz41o(0_jrJOv#*UI$YH* zuvve$OP9I{vSaZVAY()VzTSFLzL?hx`H$u*_1aXHH8hwm*^}S8A^NeuGpgd`vzKB`bJ(%v{ z8nXU<2$H5=D72oio36Tckm>S#XgH^YD-|_y)v0!9GWNuCN|mJf>n)Y+$%7T*d|3X5ha3g!+e@Yc5g&U2k!PUHl(eNdfrsP6XVr+5dLw8cqYJjs` zgUmwBaqx~vbiR_n@?8=sG-x|KzV3`K*Q7~%5_*G;c?G+@=o$_!>&7Omc_H7~*6dv1 zX9Z=ABFUORk;jzoV7zt`=$EX;-MSVG+pr4-M5cl=TkPKkfsfO{|E9lWnI{NC zcdx<5%CV4>IgDLA-9k5`_QFrQAlPQng)R8f0NBqFL)x~mVJbQpdvrB~Y&s(PwU)r% z)64OmCZExC-U&IXXX)rdLG~(=`{(1%VQ8>3dgc$sW7pP@mf0+nk|SaFx@14q4dryv_v_Xdvs>4IN_BAJ@$ zcGkx%3&;EA!l7(F%jmIA`Z_(3K|xQb8)byM&d$L>Yj3bWa>`&^QukK_B~Z(XmyHpYJM%rZO({UyE~}Et3}F>&ZRFk zqu9zfg7kM)Ek9e4)%-g^5hE()Uh}=_LBuO*y@?}R5Tq!3QDeZ$N=^`~^2bTVT_kHA z6LCkcJebqEif*Q^!g~Kn*z_TTnqGYreN=5Ju6Yx99~6Z7`H!G1Sc9>7vsjtr+8Y94&HG=VG|?Hi@tysy zZ@X~H!N0I`#4>o4rpe+Pv*~Xa1);~;6D%z+m)PDks9k2nd@kq1pl-R;*10b`eMQE$ zudxvaeR>EZ4|HW|4cTJ6QD0c=A1gUu>&3Lgg7E9!)tsKFhN{<W5+jO1FsZ5cFiTLjceIe5`aL3aPbTGlZ85hM|z5U#tBg_d78;?yB#7|-{%xBn#54*gf47+O&D z_;C_@;i1lU3{yn|uHPJAyI9)Q{Shr{E0X${Hb}kiXVRt^D?AXqpR&82q_ovXs51;@ zE*$3TzNZXgU;ZT1nXB0Xi%Twcl@r*P4jDAXrC_S#5qhzIIiZU_%e`sf2+ z5lEYm0H0M%*;GwK7Lv4tOr5XNjT@anU2foVEzTacPwD`Z;{(w%b}HY2pM+o2(xj+u zC+Xh3bO_PDP8DBD!KiDAw3KHMs1EJJ=Bs;CvdLcBxmsLwB&K;J5J0!iH5D zd?dRQ%22k}kw zLlB1C`3Q}C$FMhI3z`}~2Zt}wteZ3uAs~$Oxpr!H-_B?gR6;GXE;uQO*Pnm(&=z;M7ldd6qdM#1Py~ISdNjuof7{=Vky|1Z1oXg6%V(hz1*d#2qRo?7;m^Z1#r7^sw`$ND66h=G9J(bDebvz`6C5Z(&Fke z+fN@-vzuJfw|t01f2&=r^7h(i^q|SET`f)Aoa4{4v&mKuP4J=u!wF}PMRWA?J ziovEMxo|hL7;KzRLHUAfkg+UUOf|fOa<4qdDE|xV!rH~op8B-rb*}t)hal6N*oS5< z(ZH>DEZEf3f^1VoG{aL*Xvx#TxZ9e6lHDCTk(Mf5@KctBkL=8RQbJh8@jke2T`5?- z>wuQzudw#%N2$jXLH1j_o*sOhP6I3NfXk5(_OZtl_EENjv|`d=eOM73el`qu{`^J0 z`Q{j*rp4N41001MZ#_X2Dez3t2(wh25@;QZ%>CBGL zymrkJ2XJp?7S5#w|Lk$yfOCDs2a4o$Zlnd>OIDDj#`VW8P1;o6GaKHl(qm(~*s_=0C$9d?jte$z78631`XX#uVy#?hx6fJFoyg55KQJlj=&gl9lsp`4Nxp?1D`i zbbA-*;^1!s{}S{uq{0TTK8uCJuOC8bR;46emn8jT0y=z?M^jCzt|21PJy_%1@HxJpp!8xz-*T( zG?$EF|1C0=+_l$=X106|k@XDDgj?b-Lp3~@WJ(Ek)#PTYAUMACW40Z(thuYAF#n_~ z&D-aUMHz~+Nd|@J)Fp+zUl&RTR4r-EkjbR>ryrUholC6~7o*&!6biGVX;RE=xmM@kXb0s3Frlbxo&&4-AgfMWj*yPSc0RChp?iy zHCVO8UR=Dp4-MRRp#h0ay7vk82w8p9G=7*i@L#^CF(i zRTR1=O%?wY3o_fn1a`kOBmZIdU93%XVSK_kw(!?_jJ&*^LFGGEH+~IEvGrqu;sVx- z`@#PPO<+k*S*-lLjBY-ug#}?dsrO(V)XU44uBx1q#`KKEq{H*X&R+(y_Cd&OI2&}{ zMFA^uG#+g{4aTn~u>_t|^V#K?l=k00s#)wOo-T@l6aNrn%b7HI#9c5jP>}UA_QfCT zCSuvo#Vkr!m(S}o*tggT^1b`RsBKXZEQ$OsO8NSt0YXE6b%6{w(`FIC2U?(1xMx?;3+<5eY#-*th}wpGJC3`=D{*4c0e!kU3wQT z&69H$rxAKBub0$)O-a1rz;v#c!l$-X?0bj1I2wFhj(JCu$I&2Zk)AzXY8{Urhb!1) zXGM6eZqM?!Zex$qVrX)lC7a(mo&EM&jnZ=!=F98UdqZ}!<&LZ9@>c%c>rTempZsX8 zw;=0ksLJx)w3+7^Pd1&;=X+%6(%3#KZ2WOO@_Ij0+HE)p+iRWh>W&B0GGsnxM-llf z%b{C`Qdq({O$=6br>esubMI+Gy>zCbrqVP@xY?669Mr-$fB#bXtxEC8UnSA$%`Xb+ z*#a@QwfS7M2`kcdnC%oK+rKM8xhrQCq$|qyZ}fnckBWk4tS%1yYzo!0dgIvy1zCk( zPd30i23$P`QZZjC-;rT895-#UoO_&6NJ%5Z=~{-5ip;#(e5r5 zgxR%e=-g7zWaSgsvrRwYo{J?j^fCnhBhRFK~*3>Dod1EQeLq}dW-iBGa4yWTbS4LQe9NXKTrV+K)(O*AOA;C?2-+^z_;H*cPM?qnDY<6sUDi{&Jo-7z3rxYk>0a!X z(s|ge6N?(nc~JX{@CI1HX%}r&oMcWtRgROn)kfAmjC&UTRIr=rlX2XwC7|)+6(n!y zkKT)(%Ljg*iUoW>nA*#e-hFkX#c>w6@}(6zgl-iVdZu8B^Bq!QX~L*^IJBl44mXTp)uE56Dys`7E)!&KiA$wau~r_) zS#OW$6_Zb)gccrx%>V8L@ShruPxUm5+N%WFoHHT#I>%0mTAc;QFNWjk5tg0%U;5F*h^Yoy;iuv)(B*<02iv^D;rh*B7bk}t^*^xrY!ZB3&pqi$ZBl#JAhtbc zAhcHAW{G{BaN=Vf_McHJswCaSr#tCpra}%5`uT7G|L$|}{MtY?tbPq=9lgM3{W&_!Ibr+fj)2*-V&uPG zcZh2&?vcsEU>p>0h~pNW#iy#9>B`-Ml(%j-DhPsXev!2pclJBY`neE?I8?*6u#1pw zQHpye3;~B847X}`rjYZxtY@QwtIyVa*!-5Wl+IjX4pknkJXQ}Im&wrabe`mMR#BGo zToKW`k@9(dlkQ&^@O28u1p$1&8EoWwp>`_b@--Y8?Sj27r(t-EE?#eY%`STO!Ctu%)L%%$5&vb7 z&WS!y$oCF0JvCv+^5dxTGz4G%9E5I#Tw|NlC^esy;M(n15V~vvdYhSH&$kos+OTcl zC+F;O>n-4EauZ_1`=Ph?VD>BfGEx^Xtq zq>;}dG;=Ht7{z;yVkKe6_+&7))rOHXr;w%kBIeyE4$~bKWnD+hm}hhp^EL}(#^VmM z=-!uc`YtsXQQ<&wb|Jx7INo^UQ?#8>*(H61)la$!JH1f?dPgMj?C=#`QSmwfAK z^ozCh^9T2ddmY5wF9-2K&+Qmf8iQr6D)_c~8{Xi%CJpXI)6MMyo}9((8L5liPj5hS zi(rSgajoddOgN;tgjH`l%=T5O^7}KLjyMLeFB=QQ14G|~(+Fi*QOtYszEz>vGD^?Y z;pR!ye%eFa&*x=%PI@e4S0{$A22=krR(ROmnsd%_ah{!@q|0YtL1sx}!5T%u;lFEi zw%rJ?^LMBBAg=W^FM(eLD_BQw#EAANh#PPUqCXDAGhg)4D_f8ztPo^IGqR}l=pt0g zOlFnUXPBjw40BI%4y0`m|C?;lcknNYNxg!9Y`mn-1sB=riN7W1%WBxz^)7lO?Snq31n{XjEd=9S&HjybNo2?@1;vq2%TS zmS8Ff=LY*=&u5Z!u4Mo|C_WFT`xlaO^cQ&U>4B-PGSbc$g#3*wv8h}c|8QQ`&1YRO zW={(COdEsCkHyiWumtJo-rq20jvW4**1fp-aFu-M&vP*PUc5BwI@exm0H)6vNl&vU zgOaT&tM%%I-tW22q3th3=EOtkn;BTm{e68hQo)aNp7K|3k*tPCV0g$}ycXt&AE$Bl z&hxt{e|iu*ImJ@X$(%=1U5dkh7>dU`Yrr;rK?qF~DDaP>ZMXEqUF%*$1g~FuTjx_nA3HJH!2pKo z^ugF_eF*BUBr{#Dz|1_X+4BO;|MM@U(!asD-NT4g|9D2HEFAD>eo4mp?0FQ?r|dKI{ubqf%mvqworUnKaI8|w!-}IShjaVZx)!6 zhXXdqFmqNLyl#F)`*Ms}IM>NLE?9#Js}TlNx#Pd3Nz{!Uga#i?+<(P_<^CALuB%*u zT^^>a!jjipFSfy#i)*mF%UdaLKq`HDtRO4jdxXmC3#Cp+%~|KcBPm#C4%U7$h4g0b zqiM4u=gUT{wsg1D<-0fL-dlucSH$Ca?i1G@wwgNiRYRYTedyj61=+j0p;)~X@yEfT z=$!KmYEIpTB_VsHqGyE|ICwB#mKD>5KueZveewT2inN~3oldpAl3etwDYz}yIscj_ zdQH~CNNw@MDu+LL#zH2FmpI1bT?|TyVnL5+K zN?+_TWVH0H)(f)tbhIi?2v35qs$Am~)gnbTH3FP=;Ng zAOzxdSe#u3w?9r|+jvl)jL*~8jl}L5p`77Q&}AOnfD%#WYqQ26WZ3YOtdrl zsW7)O>R`@ZVB)C6qHZCMq-NUQw48UH9HCg8y7TM%z3M``OIsh9sBSjPkvTm9&YYY0o(u6 zrRdEoFl~#HaCC67-1ylo+Sb8YC|Qa!?M-Xh<)R1*NL&VIyku<0nLXcH#-wPU@ zZLzHPZAthNgN74#N=g~!(%S{SnEdHYmj5|~S@|GX6t+R@qip8(&5+d9hvMdhU*IrY zMhTU@sI8@eEqxM+zxk&PJNm|Dz%pM<4m$=$uRG%|f5h@89jt4gf?-|!q|AcO7_njx zSSVj3x6S-Lu+5@>eMis$&Ty!Edklu}O^`l34#z_gdh)&lwu5ETa5!18Oj@65z#8^X z!zUrVS!c&3?45-Xdc3|)z1QZ7`q9=bc&fnmTJU^J z@6M{twjtD<6hT)7*jc50TqwFKE`zUy*+V$ULr z6l5t!o0(>x9pE}Ml^uG`eG5kiVZ!}cWT(DQ)Tl|I{LIr*fx=33dYOvnU*=1iXSgOP z_9_&=ZJ?hu)9G;8HkxC!4{e(Efg;!XG{rGX>eKI=j5+^4b_2d2|9?S>?<=Mb?N*6+h8i?VAw1 zKta~Wemfj@ng$V^A*6fxsa*M%4IXyA;`GacyLHM(N!NA{U_Td)2BSzz8Zd4U4l1>! za;M2yJ4DHK#Jmzn9s6CbF}OcF{NbCpx654i_?aLK=e{P3&#f%DV-}lgNs^D%B({?U zf$u;WtnSabre{@XX^jtC$Is@)Tyx!I?8X!pJrJ$O){B2`M?=@MS-AXoIaRI+XOfA6 zY~heK>|l5mg{mqE4{j{OTS0#RwuzUw7}AU}Yan}W2SnIRWV4mE*jy=C zdU$Uh~--l_dJI|0Q9}BuG1pJw*BrJ^U%KE&k0R=5fcG5%@ z!}=yr){9^YbDYJrR?fuaKmJ^UrH%bBXNmd!bK# z_l6u0l>m8ehoqtW*(rFbjOt1on16~Y&c9WP$7V+J`&2>rFGUH>><-YojKg$YQ$Yy$ zJ_t|a6c(Z4BF*BwJtxIC(Ahwcz5Di8(hr!2Uk(2Qzop;7y{%D-KX1c~BNEt@#u7O3 zRL5n+@*8lgn+?obyMVT70sP@yG~F6)`gi9#Z9lz@`rK}yp#$S7ds!DYB6TErT;=8A zfJ|}Jqf#tg^Z%WvlQeBuB$=IwCVLMBSy+@C3{H=u{Q9jwu%(@5h-SesAEi>oS0raqLlv zAd5`1$GoZM$;L1ar5Rf!|BQib`tK*KUvf8gss5P!)bsn$*ggYiKKd=WhSs@^Qxt^1 zo2%JC6Ei$f=m`a?Po=P}yO~9ICBgBAsnlwEn04>2AX|K64=jJ`f_AP7Le&f#=zH%N zgdDYlX>p5*Hkh!kS@^)MPr$8QU@cv@qxI zRS4WXQEXT6V}B$|+WfDLta(;P;2=9@y6_6LJy?nV-R1m|3hsw@+{k`%4e5rQHBh>D zC)Lk5L4kWJXy-ixd9OnTsAOy>?(uPh0Ht&|eMp&f@{B2c);`QEEEA`8J|vE_*)4vq zbHa_?PqUAK&h%kw6J4{`C7nB&u+ZuNo9Nex&9|6`^GQ*5Ij{xJ>a|KG9r|<@3GU?=S$UTEmH6Q ztl7+WY2>@R8~x>XUe>`uP*gG+%0|v%!VPnrG%yBQ+kVndD?4^6{R8Em;8}BDUZ|NV+Icv#N1GB!=of%)b4+lozB>lFmPr5lxv^93x9G-<>jb-a z4#)~Yyzs(~86O-2iEqb>5xW<|@4WYg_c(idm`jNi%Gsl{-te94&0wquG{d7~e~Ow{ z7K%RW%-G?3G32oE7A@J|DE1w(8BTpLkbZQSut(tqtYO7;C^)+WKMcPkt&A3Fyv{b3 zxKt7E^u2(;+Rwwv^%=P5#YZuIk|zCn7|e268XYL!El%6h4_1!LrN~5Yx>skw@;ox= zz4Jg8+slS3g9RZuS6MiiJq%4#6=VxgMr%~JLyb-%D09DF+QsRZAJ-uDJsg8(;R5EB zU8Tx*{&4z4hxph#iq3M)miLKR>h{qNjGO}DM@SNkb(&AP{cW*F!9bXjsKWLnrNhKI z`I6D*!_Yn!S?`rgn17i*qo15T$~`qba=b8B^(4?7L8jTRD5S5NF8({LA}$4O=KN|V zxv$P7?a75yWx}=gt_ti_9Ov;Y^TI(N$H7CMG4V2b8?C0^tfP7rd%`mYJqPWSI(pkm zF7x(~S}S7hfCAR};SBuHVh;x;oB>uz_XhJl`O>azdZIPYkMxJQq562qb{U?o z6KN9H)eL*6go~n-WKTHXR<_fCo@Mb`_t|Q8aYhfy&hJCw6;r6}ZAtUhDHcO>{9rSeZ8>7aJVs=rUJPe*&0G#eUh5#fR*}O>s_4SRYqV}pESB=x zX97#48kYz3EN?mc-8IhTD6FGND`O$YQ&D!LVLwZHxgTO4v_Q{a8!6B2ITd@Ig_ngx zSgL~{yRdf&c>Fw$(HVWPl{15yo${e>y*1g|_hDu!*;2$vedvDAnj(Ilf|}UV;(V_< zde?s-%N2Ck1aB)EzcZ3v4L?EFJ?9|X=`Gez_T+3NcbJhj7K>`f!+_qAus~Iitvow~ zm08clDWVM59(qjUzm9|E7ZaIbP!4u=ZgvSWKQ4&2E#OgTf=umPw^mE9kmPey#Zf_-_ zzY4FthABbZOcS~N&`wyq!X9MTZozr3!F6xtnGqx8lA_aeXgn5$2A>Qt!-@CI8r^Vk z&_bRUq>a-|xld`{d1>e52HNrW6aAXdg&khn1KhZ-Zs@HjZ2#GdGQ;#)Yt&x6Q)SFN zwpB`PmZO-9ramoHpU)mQ4rcy2cjflg!*EeC&zx<(C^ei^!kO*|+15;D_I&Xk*7r#- zmXM>%62ePpMdxbJaD7BgWy_`9ql!Z79%rmg9Rp{=R+9qn?_7t4vaUg6S=W|s?CL&C zn09QRbh%2ACB}}xhJQJ<=UZ>K`G`CFwm$&YZymyhn8m>R&B0VUbT_YK{hjB6iPl?uQ}m#YsA;1hCCi8xDH~U9Y(Q|E-UFlFlR3~XHlT2!?HP} zDyj2(7jfr12wl`fTL+4i_%eYoGMjz<%kyq+1VQ3CT?2TXZ1AF*j8?9O5pQ`8-VH;R z;*u{etJ;7=zMg2p#lJUx@h z4n(PAL)t;OH0BZ2|M^7ju^#f@LNV0+cq4KUDSqphNu4IuO9MYFX5YAec9l_{IM&Av z)oZUvt1q9CmdO=`W7-L9Nv4b)UsA>{9x1{SzvC3Uu#$96`cWd^kuGjZqrok=X<@&^ zEWk304gZkJR^GK2``$XpmbGQEUj32Y=hVuVDs*LaD>C5xOrE7USiskgNvt?VS#Stc zkeOde!B*QTZ2jSXuqgdM+-sqYO#<(m8W)pg%|hvMXOI-C9?^+v{8CkkKAgdt*$(yigXHaE_e3o5|yG0MqMph(=X<<8tm3**9P=RM_s2 zdd-{4`s^x#WAVzE(~9DklY=R1@ei`Qwil8@OX*4{ZFV?NQ8usPp7d-+e-_6%Pez<$ zpyNDU?pb@pCE>It79X-g)nDf5ekUFdB_`tu#xoCYj|R6d=CJYGLW~(#Ad1obh$+`W zNxwO4=YGb{{hmOwykcp_@3nZ*Rnzs%hd}z!Si1>yRX-?XMI z2@h#?z@t<5$;q8jx%+C|?-xTyoGtM`u04F8bw|t?@Ikz|rW5OWt2@t`yv!S*O|-Aos#Ls@gp!ZT2Z)rcp-JP7Qoq03Nqvx!K&BO zS@p{1qOBv^$zDAGKEGM&{3PxjOx$s&Mep%p2W|e|$kn3V`oc|#fna@9`Dd(&I zHxZ9J9RPI`flV5_om~m@WTgiN)A)bunPIJ>tV=g{$Z+?kix-q-Gf$~YipZX^mmdztT!RCxBwguQ6WbE#RThC>$?(>Ti>IN-c8yBMR&^fj(R zx1&7sv|BrLZ|O(F-4kH^P9@pvQySoH1CpVo8T&NVS+r@rDNhJB#AN<_9q@K!y}|<6 zpWJw7|4xxOfBh()XKn${ciaYNuB{UaV_>z8KQqoZ0wXmQJW}>UYUmOzHZ`Y1l>Q)= za+GJUH}_=~!z#gB>cRZ7d$RS9Z$NcIGaMhDhiyLPwENa^7|oQ?e`h>iOEiZcIq6v5 zt1DZv!Ufa3^qCUpP(N@w1mpK*V*k3Yp!x9*ov)q(;{#rbDWN&?=s#`ru<;JWeT@|_ zjEN)tq^p45WiU|2_cKP1Tn?xy!)K!ed?qBrwC@pcqyI{_X@WV^ofu3bOww@bqEz(0 zZVq~lHwZ>frJ#m&c6vLk4;Mxbxn72?oAV2D(wkd{1s&W zk=oS#gH-6XiuWF?4OsoG3t*AFE5u3Bp#Q4Ht*z`1-8K||=-?Ou%lSVu@H*qa~ zv`u2)J%-`t9oK1;)^HZ|ueR8M>_8AJSuZrFA9Mm!&?gq_p(0{b1ocF8`{(U3?;OC_v&)CPaf zKM?oZHqzH+iZZv+U07P(5qf`&XAI8PVH^H@hJr-~==D(%*WX`D?)Q3Q#e+mK=g2Ne zs?%nlePTqLTb#L>_ltD+?^jAX$@0Ewqt|Bz^!}WTr<%$NZCz)xSgxO3{4;=7^xh2% z%hhqhGe&Rb=Aixjo=i9M5`9Z3qC3+<#XI-+P}4^RS#Z`Jc=x;lwr>0hdynfghY!1P z$qhUFJx(2m20f!`KBn@FoG91^gXD9)4A_te{$6eyk4=eMtVN+et$fgjl?VfI8}DU~ zHD-zJr8}`<9OtpebjHV_$JwRB%EFtlWoWE^P`vo{CyY_52csixV6f$f{87&W{5!-G zZ#BH5&7}bl>Y#wO3gekulp}QAw}!smq!1aM@RavV&VRL|NCnr z>v?r7G%JNu|74zj{8U+ZTGJmZdh)u3hBB+_Ch{3}3x2eUB1;Q$sT`J6axJLlp=EAVe@@DGiN7ks_!HHZ}p9B6sc z7&d)L2pJr0p}7iPoJD?C@;Ltw7ILq1Ud23^Z;(z~pBK;-hZWd1Xfw{_{>I7eG3@y} zBUJv^Oy>V0$zifCb>DIfI_r(b4*haapOJ*O)7_!f-li$7hGvaX7~R#w6Tvv{x> zoeGOyd;#-LMPz$Ai471x%hxXc1TLdiveok)Y1&a0ru}}27^)=5^1PIU_+Wu82;jcf z?rUhw9suJ(o-Bx;K~uZ#rz^r_WErB%CfI1$Y!SebvbwPtN7^qAxY_ZDz)_Ivou|E zwu$R@YLSypIuC{?P71O;Wo5J_!jJ`?_|L_rS6BJ0gC;DeeH%S_G16(!a>#9@AC6(RlmE-7zFKc;A0fLHHEv61!y7AhTuf0z7m&f!Ki8iNkSU{wwbB zYXPxT5x;dG$Flf*d2hb~YN~KbVYqNyMAR7TXnpL z{KY9Frm_4eT$_9g%ndq7^_3MHH>Iy+6u1x9wtCREF^iGK@VP;Bhq$2lAhT~h4_cFk zp;Pe_S~V*LG^aY@+b3z#F?DU+JEDT}jO65UgL_6cv;VSW3%lD$s=2{bl2tq#^$QT+Ht7 zVFP}s(P`fgAfJ?tULUwt=gCvp*4>Z{O|OE9suhk@i-r5&1=$eq_q6c{N)_QJSi=NG z;lRULEPX&vw%w0s^LIm?q;Ry(u2ji%D_HXDf9 z@6O^4M%Q&H6r(>t9u%$MaR`?xkUD zj(gAUe20A5W{#4I;^Iy^g3G|vbrulJ8%N4yK16*rjpF}vNGz$uAoad z?HED`$$1@rZ`*8i^HGpJ&5CEfQ%_Tw@+tAf-d^C^F#;lQ9fG+o#<(DLEE_2wNSQPn z7ip;o^7Dq2Ilur+t8JxCv&^x1-y({-x|3YG^~H<}=fqVJI@mBc8t2qbWbZf|Z4PJ9 zcaFDVMSp(5frTa*pqDHOKXc&PMV=e}ULD&FZ__^?uC-uA)V;@3;<1}Dhwx0ev&)Ni zyiLW;NnMz4nh89+rzQF952QMWUbG-nq*(?l>9mP4EEopt`REmr3-@g44xb`UYf^BH z={yBIXFL}zUNqA>b_B~nNoc&@1tX+jR@}R{H1M?-`}r+GY}?R@CC>L13rbhR)r=gP zS~C#~@*>%Hd854U@(K!c7lfLbrID8qjfmd( z!++G@Xu3F}XLkrzjADkbOW_&Mtli<~#ulyYioS-V$#G)bR@h!{7t#``#0xz_kQx z_c3--Z6ZGVqmJeOPSBhC0jM9O#_MNC%HW^xvsMkoT>S>O{jJeWw+s6pMduw*)BDHq zbDLV)Q@&{-r6Qv4`8*XeNc*T0M)3K~OwxI2Cw*Sik2b^%rC;65@rauaS#mCg zI=BtRkNs6>jf;d_d8JB@Y||4$BidmFGpe!zm${-8W8^}S~?-YPwH5i1byad zFwx*Y;mzL#=&Qn0orl(R=vjF(?)xcFRWhax*#W}#%;E6%>_XnyPZOsZr?HHL1Dw__ z2lclKpjW3)R3g%dYpsCwx?5;B+x@XNWxMmiCUi$JCwV3|J{wEsEH|VVzfoM+Cc@yc6_CYvGs~MZ;YnU2U)TBDEMufo>|=2h6UpH<3rwn4L3Y_pT`KUzdw z-Q??Z=6_FlkE40QQ||%vugz+y)XtOEYGt#`kBGZ3>w~oNG_>E0!t9kIy6H|1 zq`8#it|N0Xa7Bo4E5e_8vF)EvGqi}6Q94{L{SM=VGvwClYGUit-{r}VZ1UiKJ@`0v zuuf1oU&=K22Pmzt< zhwy`)a8zzL9DcS5@BIU|XV*zTvAo6f6bWv9D92g-mchFgXX%0#k;L9=1>L#!4wOCS z>C_|o~MT5*;ivhyHaa8QOu zZLp>%2ZobGO+VT@tq8i<&h>N0AowH-#qK?aai~cT#QcYlxVsm;U8CX2$M+aDwVies z48z)-Ayhwi9lf*T14Of)?Jc+eh~~Xluy$3NuxZXnOn?7M$l2;dkE-6lkbh4hVPiXr zuXmvOlV{{=>s}ftsz>KY#!ddgx=Uu^7&%6s+M6oTkXRGa8xu(PaNRhdk$Lm$W;{i#F{_g%qZB z8b3b)jKTtIw(F2jLEnYW$F6kEmoT#a)lB+%yAj!0bctr|jHh&-B3bLQltwVl*RFp$ zvUF#i1QMcah^tNvqFu5)Io2yrH;0KB z_oq(~N@wzk!(_PdxE0jF_#~*$U>&{f=W+akQ?RjcI*q;?4Tp_)ktyTG3SE6MgY?X(z>h@v6?F>jArAF7>NG4wuRH*&9$@tWo>23eb zB}V73iFXDJAu+o1X=VOS8e%(@Zy2c#i_1iG@_EKT8FmHdq)i~f4@}5g#{7=yV|mG| zK{QBr0O@n?#3l1M&gc{CcYHO0EM2x~Xfc--=v?C~EHj{fy(RrSqy3@yx6E*?9`v zm}guX)*lM2WM1Afb(h7DN@1LHBhH%R2NP0vpx)z;{DJKSpsA@u^&=Bu+&R{7{N{={ zSC;eH^Z;%)?u6-`Uu|F98Ct?tlW3!-JBM~ILLpUeLPr#mO8{Qy()Vt4|rxHJ^&C1Xg8Zw%%e zg$w1;RWNzu3Q*E@p%1#=VOlzn&IzYzG z)l^sZZV$P?A|LZir$g_wp`W)=2LQ#PDL3BY-xl|XJxpnxw1n3 zsBzT$at$0C$VpP{zCg;Eo1iky1D{m{GVZ+qamN_rJwJtSR2S&=1B|K3G_WT1LGUDE z4n%7v;*OaqLQO~xzCI}nKAk%-bmM1fk;H;lMr_1{e+Fc9|8IQErTNl_J`S+KU&LMO zP(k?8k9L=^trPa#do8NP_bCVQ`w>SheU%_wIK3J_`B7oRtsOKnx{3aq#P-Q0TX9A3 zbNHQMaohS3!`SUZrU}D3p10YLvPIo?F?VAY*#1t#FZYbnnF(xm*o^6 z+tB^B-Tci7B8lQtmeo0|kFKVzICkML;lIfhILFkMI<3_u54?Xk3(WU4mQEATZ!dt< zp&Vy$YY+WwHh@Oetrj-+TLqtH+0jqBBC61{pT3uzB>MYa;CrJMTvk$y)31$zQ=>-{ zH$#^1R$D_H`o{q8_y?z45pk#H%Ft%(Tjmft1mTLZch%)(ZFJ1 zIg!lgah{h%2%DKNtPf>;ql11>og9v#r{uT}2gXp6Xw&Uq=fK7Eufn+z_xK^bCE`q_ zR`?(i)Ard~bmpy#;`y7`l1sy~@#CH))P(8%ffpZ1SH3RG$Ka~9OH>NThH_)ZSJF&iC4;k_=Sz4Ry2U&~@)^YnB_FYMUW4~VDS($3w`7#s~ z!y93uzO2Oej~3}n@uf%H66xuU-o#r8@z}gA%wJyyimy2?&uAhdfEDrLlq$xTTVDo>vt=J z4J;$m(5Fk6#UBErjh7)RPsE85C(&^GQh4X;i}q`3N!^*IO9-p8SH#QI53o+X>XEQ>6`NOlMJeCr#}gOLb%-;lycGdV11E z+P5eJoECGE6B%JdcaEmebvF>b{!AC&H8CdV-}bA^%3MeHe{q3D-^K~na#Mt_H|02! zUvBipmE+>@-F2{ga~}?g7)!+kj^x_We4_F+9)87qN8f-~=n=3Ve_q+cyBo`Lg)?MG z&8={9I4yyCZ@bL02(_>@pY>>Fu)Rx_{T-8cOtuuWIUJ-gA%6E zD&yMMG<>-?)+4#CaR(%q5!4+kMvazJF!AaSen$FfLCxEW zjB-$7zNQj+KZ^B!oMw_4syFzd_Zk1H;2?&4{sYTJvXbqL7p57pkm5x>s=0^lBbzSA zTW?p8z1#kOj5MMNt3LB*Rz;F){e9r%bTv|WaUSMQoku>yc$o00t4=>!g_t^;!Oq%# zr1WGsIrYN_bjDdwiwmovyE+rx?#!ho#_Mp8T?!DRalWy|bfd|-KOLKHO zO!GK_{c}$Ul^y2z`N>GsaBjm*Yhy{Q|260`_oK^R91@nBcf#e*7tud0nJ$$*ODpWz zoz^bWX+g;(*k0L+FB_!9@zwznz2`e_)%FzzynYF`$MYcl+zGIZ_GLTf7Brx%47WsG z;zQUTo96@(S6|U0wK*lrdCgsmsYgdJcFb~;!E~$V7LGKd^BcU-{(^ZPwGf+802lld zp{1-5E|i}XUn>-G+H8~hxKBPt9({ur+<92G%!fq2nM>2VKH!cClh9ah5WKy%ffS-S z{LLr@Nq;*!*zg_t=YA7myu~QF}8%_(8>ecA%Yi!$j=TzFyBM6TT$rjoc+Rz;n z&FQAFZv4q|N{@BEqTANrAd0*UYQ;mqmM)+0l|<*r6ZC|ZIhS45I8tQUM!{Uzp4JWaQHD+_m<77~*c5rWXV4cC^- zNs6)`!qZnLVAuIl3}#))Y%^8*aiRU$Z$+b^gK<>tx8@U_Y6A#AlnVEQCX(8O8_*uG z0T-4VQs?Jy`S8LGFe3g3KS*^Uo_J$G6t@S$v|BZ>(`^Sh-z`9cSu-((X->{+2O;_u zV}NYVLD?}9=|4#`?(qyGL7La`xtB6k4Bbtqx3Qhhsmr8~Q@!c*&?~rODJM~Q8xC=D zdQhae5|bB4Vo1XrxKS2JeQeVCKOcCSmhli9+QZ43*=EG*rzNdiFcpqnc?+Xi-fb

A6 zOmTsg9hHv}an*xO$oIQ#4Cs3X_L^y7#`bvT8_ROadd>Lob|DQ|V^4e5Bx3Uk%Ez3{ z!mlst>GIMHy5PDib}qL?vl;hL{qG6v){MdV7tiCptrRT_gJ{CzshC>HwyA0pnO1%S zo>zOpfz?*zaT4n+zb%FJ%=88hdY5q$>@Xt)taI@12)N)$<)R*fT$8~*WUABA9g znIZa__oF4!@$k;241$#Ipv>jraHj1hjOl1Z^_gbE)VX)iAVfks>g34w*&@lnjV*lg zb#3xUQADb2JNRo&%lRAj4w#xQCmAsE8rTgs!V3y_&`r1~9Mw01R`&lgaq=xZ3hT&~ zKcAS+bQ4}OZe4)C7U`=x4JT`qNTkgtsATL|yFrn7=GH~Xop=~OR^LabFSBUSLqB-4 zs+9dX+xb1-B`Bx_L+?_ip_h%~lbP1Nt%Bp`2%E)qL$YxOa-9AaYwGec9@iWfaXW$< z@bFDpB47F$*IHDj6!{gSViM-W%bvD8}1Soep7RME+)SbRMH~BlJY{r4kA+rW(kjK_t7ECe}WTxcdlIb z4ML;e@V@&7k^4zycyLDq=#AeBhqfo-$g2~@?`xS4JnJkZGcUGLek4`>t3`Zv$%=jB zhf&6V=9Zpk+q`E)WbDcedgXW-#1C!+#|d*`jmt%pE*42PDwon*uiR$4_u%;T}wBgGfz z;=@~oIL+)iUN+5u&t)RX{Q4R6c-d-DEOMlJ?M}R>H}W4ppFrjR&d|aQ34FS$8o8R5 zC*-MkkoDWkFllllM1+Vi(I>*Wclt6|VLXuO3PXswt~>qPv;z!!M}Vul43*9sE{tPY z@}7Yj_^`&3#)h&@>yI7SrKstU|#6mi2MLii01Z!y|}<1UPD#5<#mDRrNL z+j^rwd-((y_OTE0%ixokVK;4hmM*Wf&u}!omOUp`DHb zbpE+YT|1V;6&W!+k>~2YrOj|-UKUX<(js2(ePEdV1e$98A5F^6M_rb?z8INEDp>FK ztJf%SbPEvgo6k0)4P;msJRijsBFWLma%9T7I=CELF37UnW8(-<-2P64{i=)V&TGA5 zo<Rpl2|cH+sbYnU zy=FqhPBTHi_Xx&%d5~VsLL$%3^XEPD;O*}R{J;`f*sA_i> z(quFE*-i>z9a{vaKkOl22lT@FEB@r@2rqPUaY1RTJsn{mO`0lLejw^UF|_1L?>ZcY z(@z*z(7_wbZvvE@Do4vfyRfnwv_rW2X*F?CPEe@I*2m*Vg#=gB+AYA{gwg}p3iB-#D?>;OMSVPaYk%{e=m9G>D$ zbPmYELiKXA{+mcb=S_lj7x$6UsEtJbYXa%#yR5FNZ2}p0?Ff#4?o8X*XZ3oym@0O0 zlGyL%_^kK4I5TMu>%g4>?OUmoE;_=OytW}qMOH-nM+&cgP9og&v>_emz6-MH64;u= zST)UVbkYN3l5KZF&=Xq{XlsK77d2eGmzUGycK0Fqlrd^N*P*kD^@!5@Zr*xIJPkB! z1kpi`D?4&a2%oo=?jJpi7_XfH-Tk&h} zBs7z{5zqYN(0Oo|AX$3}^=BL)BZ98-KL^Wj)3XH}R@8)Bw>)D!8>U-kdt&~*bJXL- z7`CIegNCCgwRv$B>Siy)?J+5Iac4QvRNaMvYjP?7I2vp2MnLfu2kOjtVhTp`5}krf zl7^PO{p4B}8ZrCo67evP~n7(8p!M^2QXV)yPWTbk_-Hcg9NJ1hAi#gFaMP$dIWC z!Bpgzf%|5T!keWp`Tr)(qHo#nLZIG2da}YCHvJ7E>Um=^)wY0tY+(f9!}de}8w*Io zvT6AD{#5#d<-u>Z+miHw9CzKf9`jvIgl4@0adTiSPLp|rg)iCj)5f2Fcdo;^lXVkJ z-l)=-IbwQ!87G-`I|)QbPok4t1`VY^_}Wo4Rp zE+59OA4J>y^}%2s+qBD`$EzLJr$-N~L6C74iD#9XLv|v`6vhs;y>kHM-Y`%Y+dSUPbpmEmfb zrf4+ie%(%Xc0R`LG3!kxf^W-4s_!q;kMz&6YjAZLSAi<<2*wN zN$|HO(K8qy)ufG|(j?-}C~cu<8{{xQz6UQ|^#bXI_0%Y0AsN>{gKjOBkp#_1moBWW z;u{9NL5Ii-xYXkjiTzITi{pJT&CjGqW{Y`S+q>fQCQZ8DX#@>yV9bpcHG1;&bMRoy zh-oZKVQ~2)qU>ZyA95Mv`r3ulA-5pt0qaY)@=*Ep8?S1wtGeqjS~Z%QYsh1$G;YE)At=8_eK}W{F`CIXCwOFFcyWhL8gufytNF`&tK z!(S#dUHn}uq<&T=V_ieV(XsUy)w-JBJ5hyh4QLTkn}5Qokiqakb|N{QA4+4a^|9@! zBK0n57LHif;>9f*^_OHzc+Gg`A@}`))pMkvxFuVtY)AyxBR7T8wFNNRF|E%2+&Z%8 ztpMGX6b9%<;?cAM2(>v#$J9S|u2+?jELh1&F3c&B{@I~kNt&>80lUK$ z+L9;R1IP)jewe%=kj4$uAbO`y;dFyo(yy}*FYJ-w>c73l0+lO-mzSqpI!Db{M#FWF z3_4|PDqrfL1V^bGnbgd)jQ?2Zm9fOj6^x_Z7)0FG`IEn4a-4m5GP-TQfty2_PFBu( zw!ZmfeL|%0Ca;uV#ivP2lV8I+-yu@9^iF(PG=MDH#W*_gJBja0d8$@e1C0BI&u*{4 zF1`k=$Z21I6VMBmXN?|mqAa#%QaMG=DFzoRX=CkEc%y3A~Kt#iD#zezb>d zic+fQWVNAlQw{tzizWwC6~L8!KkyY&HXEh&7W+XzxlYJr=>;re6Gcf zGk$`i>j<(UBa3fhKhxtUq}VDXppWew4F0!=-?@D=43G4nd#S(RyI6-z{(BfUig;49 z*cvL=FT=>PYO)}QlN_jR5c-@=kej7UW%r&CPqs3pn_}-kj`vM;y<$thZlA)xCaZ{_ z#RqCwpg_8QH)GB&p} zxlYFp&C#Q=KgfY;4quz;kXN^y}$*hX`P1H$8EwK)=f1yr$h(d*^kS99K-TP8zTJ9!X+E_fT8s~ zoO9eKaZD19qEZ zpg4OO3A>Ywi-U78?A#F)7)PmPb`~6qxdllj)dWYw+#J5K?xE;}U{>NP_D} zAyqEo zMH24GBQ%#}2{=Q&>gTgZLfRZcYFXUo3>nPi~ zHQ|lT`8ca@B7My^tu@_5lFqCfIEravR`0}A^Svw88_AlMnq9&NnJqB+auf0yw~@a2 zha)YnQ{O2jbb49{aXi>0>`RZ5?>rP{*^&kC8u%A}SHy|KV_=PnKG=-#CW}p03W*v~ zc$CUYzB(Ti7QGFldWIrNaDR@QJ3K~kAOC@6n@+)PzdQWBZ{z8TLn3Zp!T}+<1>w-h z!6g3PTu`#xEuE=9joyDdflk5gux;`BJo zj#>uS9vy%=NA0Ot`W!#RWbl&J<#>kncdo8W;a`hWrT-P$5rxLRL{^b)Usk-vs>nma zfU z_)(c!@=Ixr?N-p%mcwNodbDjx8o%tOB^@zMg_yf6LU$L|Q>mChBsU7kk;~_VKmQ8o z%w_@B&76sA4w!(p+$FTso(%&;?9LdFPA@O51!v2V*!FucwP0PIV76hjXkfh%)ygu! zf0Qx4dMFH4HlouRuah_HfmrLctn39X<-rchMv1%JY0oZHI1cn?+tNop4tiRSI&XQW*0mzE0Uzx znn2l95u8?1V>yL541F&Tg5^)%!^E06v3y&}{dV;Ht--Hb;Rl7MpQDHl5qh5JPzBF; z@h9?-?>v7A2Cgr)CgQoF;dCm~ zEAGv$7ve&rY2$E_#Km+apPlCevl4Fb@ovY!tjHNt0$p)io+8aHJc;>?Gxu%VB66hW zCm1`(aC2+2_${AHpyj9*zI!(cb5HLRo(eIfdze0n>JZc8d*m@@sRC$r4Tl*Gqls#C z4f<`{C-C2AqxY^PKJ@xxFlL^Y#WQR2xay>k%zV=&(?pVr_!+`)|Gn(|eNdR&u?^-I zdO+6>9WpkE-J#ZvBYn@ZVDC;D68dA8w9fgEP<44I4b0gKMeLcnn3$4^=ci!TQ9Y78 z#-4J2%us6f8;12CDbK5G_k+) zH6e4C57Ad#P9B^dg>jWegslwF{HUMsSiT> z>hj+WgbxOhFnoOs?tNiIREO-Lb&SwbyM|P_{KB8l z7705Cxlm=sR`hg4woNLLHGbI>fc@uwY zPM|&Szd$R?`1B7{;$6GM&|!5R9+T0;s&6j3JXRsW+EpL~dEq@JBU&t*#lM&;CuvXJ zix&ES#mgplL)ueqy6tU0LeE~oHDcD=zU)IcZHj^aW)u;lKn=35=ZJ8D=@p%&=|nQ< zyO4G6s*qY>Pb}xmqc~WWlP;VnRhfBSh>(+!D32&5cUEqPcZ{+0XPqux=((CWr%vHz zrFHOhv?8^7nLrLaXo3c%Ok$DeO{bk&MUrc7@_~B8sm{Dn(|d}jL+GGTKWq5Ev9$9zR7n|zBFH1)?b+s}w2 zl1C8tm|wzp>qMwk5J?8SJ%^Y5#*(5mZ*qg>Z`;}K!sL%8)cS`=qGS<{=HwXvP$e6> zuZ*DMQrO*4vqVtbHj(7|Eu^k{9O%8U46v#ODtAMf`51esrOHAgnr09G?YoNoDh5g4 zTMdCVPS?rW$t&p4ZMCTT$Am_#Oy;##%PAH)hh^5(ps178o$z<{3z7W>EHmmC?68~GZvEw947YqH#X2>k^>3Pn}hXJ29tY!D}|b|kNBkr z2jhxkESt1@4E&XU$N&83MKlDKjo9ysgPhbMRb7?L+UySUYWH!PXF1;>HKPjOYM^W1 zYE&s?`xS?@Nq*HnYMs_ZJI8a9&)ZGO=4LB$e}o)&^Na`HL`Wd_El{jiEVeZ=^Co8zIe#b$hL?$@{8mGNND=371_62UbMD{-f8( za#Inv`xWb=8SKHT)iT_$f(97JDbOk%3%W*+liaeHhV|3*ox@WdiI0bti`m%OQu}>d zs5EmCE!OG7^>rM#RZES|Ro_NUpN|y3kkiMtM2Bb$wk6$nmO$K615|tShPQ=spy}s; zZVMy(9aM<{mx1ERAK?ct$EEK~Coe}8V#|3>lDJP!k}zQj1MmNafbsss`+gA458ftC zc&Sf>s%bRx%{jqCi=AU$y+Hqzdv%d-wvZjZ9IbkmN1C5#(f?wHlESl7NR9m_D(zm% zPq=ss+QuiLSBVRj=~W4l#-2qYPUXF-$R;FV~-l_oNzWUm-Z;@kk9{aLRND={Ob6I(LVEO z#()|0VT}X>FL7Mblm&v@?Kzk*MKUVJUdd! zB}grU+Fgnv*Q0BNFUG;-RM1s?=xN8g#PfJlTh<%yiy-&17?*$eJTz**0JF|i6El{_ zipyMwo-OVW@X(Q@N99BJ>BY2a*%wq{xwglbhm(lcuW@>48h)kONDE`Y=|mzgHdChG zy=6JGweiAR?OtJH$q};L{s07(UE*hFSPO1ZAK=UFJo1ZqBSS}BhCA^+{3cyOZLXa` zH~(a)ojil?ntUJTy)9;YHUFsHmm|2hH-z4ZNak<5G{F|u;V z0UCQ5J31D96gZ4*JPzH8ln-@xC)LO8$ouh@NMC(|>hmk{aprBjJWh?gd&;=ci`kZg zt`{6+8=Q^{&VY@ZwR6((G)n#7V&Ilhv?RuroZ{xN4X^km+A8}st|O-f~bbeaLU|m;YWx+f3?w`uF_$8ZpL78d?P3M8a9q> zxt%Bs${!8eN6T^!6{@stQ2}|8r3B~wE;(-wXKXJuPSS76QQAFw6U3cAhJRb_;IMlN zwPd?up0iSgrOVF>^{#bZz=Wfs^_6=hEcT15r={dik^ zoctb>L31|_fQa%;7&cZUSzmBaTxNzszhenxocRtUAL23Qz&rdn+J~%sd5}&Gd;<|f z3-O7k1yRnZ!u_ox?u}6@wRuGFSMQ>>K!+bhGph^rxwS0>`ZQnr;5+lX`W zF7RyAUGm0GmKJ!{*TKqlP#w?#@xvOSf145CwlI|KS6UNOwx9g$8q2{Z9|oqmkR@$K zFiC9)-P&PDE%)XVBcF|Q*TmH%E#M4nC{?FUOzU3zO#(m5zk~6u!Q}qEQ&7q>t@6W@ zA>^SYszp43kDKIa>#P5u;)gOR`bx3z-W2+qaa2CnRkPjeNmL>7hhTVylUyxkjDh4q zu$ys@+8rCDS@%9*<`C8gUZGEy20y?C+mWPv(G}RN6A6-S2K1-KCEk~LI-g2Bu~@Ag z{yj?uQH3px5ow~3@PrS2?~R_qQ5^e<^~v^(B>m=Cf}?sgv>b57if9>byyqp1h?q=r zj2y@(w-1o1$`}~6el#?A1n*hf$h+yV`<3K9T*%ghhflwvjH4Vk^l-T_c>Pl1`YRiv zuXuw;-zgf>UInZ79^(0HMkIfJg%GvdF&K3DkZSQQd!9NYu znI`aTfgiE1t-;P02cUlZb!;n)<<(bM&~Cj1p>FCqNM;$J{nb4}!1R7(*gyrA(UD?v zq#kJk;9W3io_(Ea%Po{YW=+3*izOt$UeN53fcb>yIQr*W`lh{*9#(k8 z-~BL+j1KJr`lSt{(?v9Q3wxgA%ScvddPC`MPV#ZcQhNBJ9KAooko5OuUd8HII#&G~ zt}q#hfvbM-e|GMre|EQmv~3NB)YxKiE!zseww3Pt^+k|-5JY9{=h6tR*;3~TANdcF zCk4@qYP@krh8&c8!O!a4NHV9yLgt@DOdV!J#mW+D>&;1m?^|IVnT5HoGTi= zkC$Fez{wF3L0`K{tR_29Y-KG&m$3}n`r1lyC)1d29iud|exabg`35WvlwfcS^ZA`~ zKB%U4T zgXP(=uqsHC7IQppwvd)C+wsDW#NkMKpBoa9DaKYG~gt)TNMneMvO z4E&gRG@h3ewCvF=3ZbEg9YG3oGmjz8p> zmb*+7$}@|ELH47h8GAWy$inY<{?RR3`N|jWy$E%-^qW8mH{QX`Y-cam&k%Ih#Nh(% zNo3tMIdHKGB@t!r@ZPB!-G8woYx{8i=;K22_*pf0*cg+4&SBVE6(GdjTtzQue+S*6 zJ>=$xMa2I6GX%*Q1V0b_v-2XI&GLjtaj`ZhdaS#kaMw@We^fd}u;b|8%83GtLB$zu)gC22I>%=gq*ftLO^$bPVeIm)< z;q%~p5PK(keugun)5#e~A%QK^>GZyzbdji(?s+toIs}fPF|#vh%YtV_)$Td3>*h699QuK_DOw74u21)vP`l5{4D-ktu<~N$~zN3ZgB&qFXXK3wQtD?=Jg#>3!e zDHNWp1H+Jm!kwv6_$p6Mvf@~<5I?k!F4HN3rh;qeF|-|Gs$QTRJHHQH;7iV6rZoMh z8chtC3t2G+ST+9=kvSBGl8;izTVE=8PEeycx^j{@`x3Hjx)KeV&XYK+3z&8|nYa0| zgS;*oLh>}nkyNM4aBqVLwXck(T3hajjci0*JJWJwzhv;*+lLFg=TpXk&JqgKWH=Yu zXxM6#i!M_cqf-06up`Na|MHrX{HhDUiU23nnbr;uFV*AJ+qRG$la5n$hSJNp0~e3W zBgGa&iSg)o3Z4Uy4A!Q1Eyj{kX$95~EaAO-GiYVl19+mLO}4Yo?^foCc}&xX;Rl(i zfl>J3c^+zb?x3wUDKI@VR@`Vc2K48}@}owC3mfz&kz-u{UJu3hBG2RS%y+)bxm(#2BG zLRb?qiLPA`F2qcmN?gtuU~BCRqCQxeybhSicZMBGw_h5voQ1!&Oz?d~U(dM~Ggm&B4WO>!JB zXwoWg5nTXmpOcCt0oj2>L)(_>5Bx3MZR~?@)p-y%i{mDp({k}DjDsgnr2N5qUSwa9r3JNuD<$n|*ezzlL(X$tjvcnVXieCVH}GepX=sZoE} zS?ugEw#B@Fu2Isb4-aY4ZT=VFx;tWhzd~{j?! zI~>MQqO_Cd&9+CEzLOaB(1TpJzlkI5C&BIN58%kU_Ijh0XsYr_Vb;|haLHYcJEpmk z|CSU9C#SII&Q)2?KV&TNJQIb!!!m`i;!N_i-pcvSDS7IckOhGw8ACmeljv9sB3+Y@ z!);{~YQFw5X>!fL-Xl&_k!=Y@!@P#{v z;1j#Ek3HRhBX=*PZ+G|;>q(qMu7l(3I$7WB=^EPf)B`eq8`FKcQB=i2z~Rgr|8gfs z`n>xabjNdC&v?dDlkHFL{~IohGP?&yH>ab2cLbbgxu6ACZ^9?0wG6$|$rqJ+GY?Ic zTT`+XM8ExEs$mn3+I0+MtZLAy!VkLlD9{&r4YdA1F18M-LWSaXSS|cL+j2;j{Ms!; z$AoIb`0RnCr0ycF@)Ai3AFP*F9WvrWXLsSfVmZkVZwHnyJ6abwU?lw%b^udkR6uoZ z8d-i%PSU6G2NyW>@QDW|&^K=yc#YRRSY~h;?($2Zm1SE0to+6=h@1d*2YPtkTA!#N z>SDd~Z@kCv&3t{;em+L!2Rvo$n$>ap!8XfOS5n z;KsfJa?*xE68pNkx0;aJa3?xVjrohi_EP*1hJHtrg^dTMk*<)3unQSUu@5;u?klW*Q4Td9a-nFtJ~4m5*d4nf=(voBLVxCaS=`!7E-=ji zRIlR5kwgmm z9ap3uzRrU0!fvd;=}7gP^Mrau=2K`shhc8jFj17rn93T8sRe z#Bs~Z<-tsC1NpNs8h)O%Buj)`wE2A-+`Oz&W&cZzS!;`NPdAXiY!`BmX{wOjv>(PQ z-2u%<`Mh~!9`97AK@9VCpl##=;n3!K8TldlF%6aQ&6rz2PALAg&+Wc-MKeu-?Fx4DRuuYER!v z>r1le;vA0q$(xeESt-Ky)<~#{-iKb}j_?UzjikcZLB#(OyO)-i;*j)oIB+6aSUltw zdL{|NJGS`rNzstDoJt}g#ZO=l`#GM`k(C%~vQ11S5w|}glKzeUBgC(eU}c0deXYd! z1zTk4!io*V^gtN~U@|GvDG~I(Xk*QpFW9_X8z0&}A;GV-aNeqkG*HQv_#CpO1K3{8 z-G3kX_sypCu*W!5%DfFuFJvKjE90hJ3MX@Ieu4dvS@ebS2ux?^%sOpbRG46eNpcbN zLU9AjD}-WZ(LBAV3vxAzCGhl=;oU!B_DVT#-6i{ zn2zhF|3{CsC($cUQ>2wSQutVMg?>7@6$Y5z7h2~VkhmemU;6GUg&p&+&OI?D&=D-3PQ*j!Hd7-$V;1ZI2I%*!^??IIx9xup|7V*z~zx_NK z+b>Bp84-_Hr*>j#JtOcrc(RubmH4t%o$`_%%U!0KP`^5V_Dy4-JUuf6nl=Z(hZb8X zw|)Wlm5U&HQWCUX;T(WrBiPV;TI5$(hyjrY!J=3P)>&7GKYgEqNydL{73XWc)-#sp z&P_*Y^9bID1Yx1%2n#qb+t7&nvRpW)j{A)sE43+g=_XjinNkn3V#Xyi1xSvvD^tnkfFeIuVwK=(3xa z9O>@C<1q8E6(*bR!?y+-a0u5#*!}weWm^VFIlX!A+vs)N3)G$s*&-7Ps(ALxg?6a_ z`zRP)T0$pxC|Jo-o*(7klUZi3fb-`UF$exRwcMQP&YecQ`}{MnHD zs%+PkEUB=jPSWTRqFnd;I(Cq&F_X#z@MLKnUg0}-56x^i@~v3jQDrrx>v%|2N8^?C z{QP}ipdt*{^QJG$8l}O3IvC9LT-hE|kacRuGxxaOBMimf+1u%KC(f*Xw2=(mZbRuy zXINhDOAp4ar_E)9rR+YbuqIZ84d6P2!{7C3@4Ex+exo29uJk4CX@>Mivs^3_#<9V1 z;oxgs3CAW6WyLcxq=13b$l|ym^)|f)*Zum_c%GXbn>QS;74fW>&-bx=8A0WFRmGfb ze_-y>dr-TrhMw$F6=u$^!y)<8$p83z*b=wW&rr&Q{*qfHaZ z$`wb0AeC#+qN9h?%{xArQ*XqUJy4Zs(PcQI7b-66wG)mV>_IY%8tk~@eTg8gh5d)7 zLic(j3gyp1Q;Zd0f;N4M3U{b+GZvt;pg0_Wl^mG*V# zy+qJ;I5F?KxMHk|VxMz3r9X3^q2`OFN6(M3jX6W1WnKxEm2lllvXFjy6nML zci^}MqekHG=C@_irrG<&UkMB8j~UNjIDC%#=k1x-w^(YcK$jL`9t1;@FgCGO|YTA-qBK9kOOn>8VTR+MiSdw4k-() z=xHS96ioUk%2f$|Ef|T$ZR42Vsd)M{+y~}t%2Rfj%Cn!2l)=5QndJ7`gGNVij{cPk zIPb+gw((dPUfpfXb-}aPC%GvLa$@kMVi=ZP-i`9$eoVOg57&S>S}MGxgW=_2p(*!fm*-%6f~(p#T)>Rg3iQ&ysX&HxbKC{VKCSuK|rv)BF^ zxMZK8Xk5IWN@bqRi?d4vJ%PpyO=RKE&r#pe8|XpFPtfUc6D@wYL%T*N93|u5txaT8#oGvi zWkKX){s)X!`!KTSeu_YEX7}Bey=~HA>(qXT36Z@>Ey+e&$l09sg?G^=yd7ND^J89I zQ}{GfMR?%$h40R|cCzge#aZRxt8T606j`(AF}(-Q>7UO2JJnb+WHHxVc4!iN&Cz1} zJ|xoc*!R-7+G{wY-F&uL?Wtrk)t?1kcV|0W-b??gR4{2o0_;*=6wgZCsQor;scKsj z{8JGWNx%Q{|961$-j1i>vd2yOcvww<;841~#uA=ZK9;PCmeTvb9buCl!N|y-pt(vA zKFp4h4;r4zs=<(5STUB>dL9w~?P!80I}>Qo!V0*Xv#(^|TWwhK{CLT}zcv)3At<)r z@y5GXXzM`8DGkkhWL(Pl@<4uGv=-ruXYg>;SF#(`dRrnU^mau(Oz^$t=;*MV@n(+M%yhWidTg@e}lK5Wb? zgbO2Y;)G^>`HItd((xK=0_{}R+t!iJj4!|mrn>BVn+;C+=!Exe+$eNj73$Y7WjBmE zkbS>XnAExlyK9)Tje655Vub~{dtHS2GJCvw|2>@e$$PmO+}DzO8w{Jc-*?A2*0^;x zi@x1}J@||g&2#V}&r}a9aZygZ8N(h`?SyEn?oy}vBXIcB0Lut;2)3~qM){V|DE5sLtDdLUXgV=xS0J`gWL)uzv4PW2Mgw^v# zv5LuQIJk^+m9u4nMTG`<4LU)W9xEt8_bk>ngwkMT2)1i8U}YM4kjpcYs&CZ5mhMBC zO79)0-*}k0_B3Pp4Q4dW-~e2j9E4pWI$}-gTl|WT8mpnWa%{jIe{A_kmz`E8BQ0ciJZC@mF&r2QbU#rQ|`J7XMCR)jDP1WfV8O%5ZGZlQ}pf2UOwU+-pf1_ZIvPOHt)*Z_;Z)T zl3~XMXR&N=CGPQbWZD(h;&$IVqWQa>QhCpVtoEHLyE4y_42n+6pImofg0BhNU%ClK z?XqW6H})oH8%O!;jodR7)Bt<8X~NMi7nC+#R1{S@meS9|9iji?OnjNG#@^r0lCGZ! zpmS+5#YAH-{Fi$kWo`99ozE#PJlq-T`~q){)bJ^;#YUh z!}WrKffI0qN&rNE(IA&PM>crKCTj1drcgXIXCI#P_lPqoU6yfuij4_+HBZLeln~JP z8I23}9~Pe%+c2@y7N+8%LMYk7Y7-UV?Y~SYyV;wa(ES7~nz1T_0QeZ?M%F1ip(HU5 zdQ2R{E?z&#_q+e_x}eYRNF(xWQG)SYRiPck;QRZ!^zz;Snh`I+t)gVMa+?XtxGvm4 z?Ow^6DWhm^Cqwi+s=@B6HY%@ku2ssLEd2aqH@c)uX0Q0FYVO@2cKB8Tt(@VB8n<{R zjlADiA0M`jnWwu6V@ zK<7}f2)%(*mK95ucQ%0{Q3;Nxe?k{yL6Mn1pVIXWSqF`?I7*CRyR?gFL0*>hL0wJZ z%yZ$DE4nhvuYbVFeX{tVQO*`W_zjugf}~}~O2I((56bWDmzz%)1f4p2ynJIk+;inT zn3ai=e~JlIBt4egH0E*-TO!$wiU(DHH`cXoDqVaw5TY-s3cXcp+50t_OzxbCey5*H z!`d<-`54!Ib_%3tfo^c@t)TdI^C>tz`Jmj~EGRnfQm2GtRWNb3l6A%}%Dx9u#8VZ4 zEZ~U?>whDNS$!*^vfZw1?$6cYgS)ywrfKZW^cvWlv`p?hJPQo55nmPF1V7IbkiEB} zj2D99)4*fU@WvZm)^UB^G+j{3DaZM02jFzMKwqa*Q$_9MS zwZyH{-{GXUCDNqpMUo;xrs&+LBIIuJ!HJ$cf1$7=HAhxLwtqSfIhRQENP4d3qSl2x+J{98C<^-d_6_^x9K@cou^M5 z+aA!o^LF@MPXSAAHptE0c%EHge_C~`2yTn-;px0PZfcwXHQ%f~imC=QuBii6eYId` z7erIb)lbsDb%A1@sk$PrV|RAv>@{(j`6TI|$5`?WZG{i-vy~fvnz1+W`=AUyNyQTx zlfQ3*$SszXn!uLJe$2$(N?~&vV+>VC43tzUi zr$5^>uzJx#eDg(6xWG)Bv+4|l^ZNeg&t7oC@DaH6d?OC{JC0O)d&3l$_Uzre?`ZRL zCSG61byM{R(CcCyB;4gXtBvDX;{66Vb2S;0{7k^8K9;U4eb}N`r{HRL{+n2^XEtJ2HTco06k76W)KnC8<7 z-aR`80lX$LoOu|A?7JyV<67uR2l>9oG>$9{L}uwVouVD|D6v5w&Zl3)p?f}H`;$v4 zg8P*d)+gae&WahV@r@QeX;qfQV$Uz|seTn|&(vnHvayfZ%sX5iGF&*W1^3>4>0i05;7 znb0HfnAqX^R@Rs2FQmn{;ECPO#p?#^py#6jl3@53+dQnW-Hem4ZKxfKS)@*3M<#<^ z+EDf&(g|Y@-<3xA9ft#kBiW_RFO=a6ZbK*jJ{7KgjbX`J^zi9Gh-?uQTX~-3`xtx1 zE-nYF038@P{gkxDU;!Cb$P|Msl^}2p>{^34Y-55ko_)HHefqB}4!;@B+ zM_aNhtJhGqQ7U#S8$=V+LY1A&)uBh3JvA!_kj2?6ZXW7mY0j3%IN*FNF1o5mKjWQP z|Exk-|6E0`V`)Q`*E@+8HgU8mu^YB%CQ!h>n@AzyB`dnLm+JXBY1TFaOQ&x}xAK9k zeUG|usp&X9~IZZUal>jX>t-C*cyOs zpU$+~KrUTe7R-*t_F|bOD#C(;=OH#}2Q*bR;r(H0@=2rhY1Ba##dO;TVog+ma_xL? zl5da+26OgEnrm)jfBFqqj%<=PH1`#4_we6!iY~MH`W9P5Jm8X4iH|Rzl3b2;VMizb z0*|a}&>AAcUdL4^<<4PTF{MFN^wDD}*6M7h<1#iQZG-ZOW`A{ zM7pv_pN0MD4&n8w;@Dv-f>U-ro(mhx(6$ogD|r7ky00|Yas(Y(w3VhP9-*s>1HFp* zgg19wlxw6Oz~R?|#1WU@xE=eG443b#ut%GK4z~UP53?rG^_Qw}k}X45lb_0e2RzWL zMVoDV{S}Q84@sBTEQd|cm*D1s=kTh-ZgP2QK)nL=>AayC{OejRIqV3fdOpvO{pyW9 zvfp8W-w9SI5ys_YFvXZtXw2`v;FehF+K^FV^sp*%Q3GdzYc3JRkqYS0`3F8%Jxm6< z52PHEUpS3>ZklwA$*21uTpZjFr|9dF|GqGBUdUM-ARJ)Fr_aOQ1um>+u08WVTO?f# z)S)@Cd^hHHS?te!Cu44X6x$-oVB3u?AQi)u5~Ys_3!f0;>M^o^9XO2m?AE#9Bo*Jt~Y~D_8r&dFqDq7lpI9ox`!0 zgDbP?m&<}wHp1A6hvB5BCOeZ-Bz}^sC~Dfj<(d@_Hka#^ylFP|(v4uTE&H&=?Hye6 ztsu+cMs(a;pkGlcTj4}lWF%Yk|y?gNPk}mil3z;**rDQ7?8(N z+e#S~IjShI*I9NeJB0fxxc{TdhJHteGT(p2^toiK7-+68Xw3+eJX6%gYfW-!-nN8_ zmvv*d+iUTHwINLEJwyta{uq8VPp07g-N?(NC)>(d%BQQfsk`Y@EdR{5 z|1?R`*gci@MD(X2AC^dczo{tp+If+!-c#&rdq#OrI}UEEUITJ_hj+PtH2LcRapT!j#c9{{yEEXtjUM_G9vgM-aw}n6H`x> zWU$}Ti?Z=pH1H7gyr2+$j%(A-H3Mkmq`|aemor;Z^aK2MpQrHqR-4q-tZWN zo1Qzch=avAeAYTTx8gFJ;?cJlWU+!y>7b>CVrb6-Ird*O~=*F_q$kmuU-9eG&96KSd1 z4{)2X9XuB7p*Ej?7_C2st{flBd(MkkpUdx^OCKcbp?6^8s!w3P=^Ko7ScS)?AI2@o zozV8@I-XJQ#;o=YXOYGh;9Pft>0IVDxJQUo_2~`L=%e!FJvuD!y#_1$dlbLFcqsMZ zT-Kdhs=|f(pYS=oE3_p{6;+Kt0_F$e>gfZ(hVPhOJB!LUQU`drlh2UG^1khg6VRpq_ z_aK?rYu6(PO^akT2aPGbaJ>9gLwlxf9fqBIwWIlBcUau2M%G2Wq3uC1W^-+!di@$4 zpc=|^hfYG5{i;Gj)=9jZ?1rzB9-?M;ERJfpD!rWViWOlhib*9M(QxztvA&=d4W?M3 z>9ax%PW=TZsul9d`GUfIiW6DC2okS(9Y#z`AdkO{Hm<*dle_H(#r?LvXhqR1+jpo6wcgPWr>e1CJ#IE~J2MeZ?UbQ|Ms4 zBU7tU6P9ON(rC^7;B4;0UL5U0kCLZ=%9eO|dT{|xzq|myeh9|{jt}9R;Zd#?dn9^p zJ_F5s_d)$)q|2OjIpp3iEmij#|%@gH6fDle32+j!1O_LRsF*UG%+l9dRV1w9F=weR>u} zajut?5l`Mg|GA%W+a`OKa3KjEChx=@T>FEv5s+x!T{7yt3}Pm5->z8)*wx&TzLxc8 z|CDJInPS4K+CPJ}@09Y5Rma%_r{Q$uduMiaK>!}V(VlXhp2BtRoobsm0erUg!&=T4 z>9Uyrj{J<>G=|Z>@hXD#bxrs@-4edUsF2R+pV&9)n%h8+1hA<&g%MLy#O>`?llA3& z@YmUqjIAtY@XGK4RFXZQ#urX^+RI!Suhn)WIx@ z^~q2Xn7=WsI9q~6dYSn4!bZyB`hB-lq4bUXqjc>~PxgK5L|ocOll>mBkre06Av>9$ zf&Zz&^wQDr@rf~Reqq2)|KtpG)|+)b^9EiV?oJ-xJ3~=no7kzs63dQclRuxU>*;Kg zTYhzuHh2pPuR|(Ae{U73X}k$_bvT6Kd-kKk;w;?KlPNwlInd=!9i^`OX2ID$199f7 z-co7ocGT#t#eN%Siu=3o#Agwc;o)r^aGM*?_PtHQzwech5<7Wyq4mh)%?~%rgmQT%phZcV(LdcaC7+qq_d^2B(CVKIhdb|Q`1GduCR!t_eFu)5v z9zfXH$&_P1i)SQp@7Fa=?E4pB&{lssutWoUMOQ%J_Y_hc+nM>+3G8nVH?lY-1D#>n z*nHZI%xBG@_h(kI9`m-rsGI8abcG??nI#u1kMryh2NmI4=q`DeYl5QoVOLgCn~dLO zb)s8$?s?tT8)^(w#rAWi!_MGc;@#fG{O>%!WM(J#RCca-dj9yxefY^kdROt8=6T{EZwAJp+^;|Wa;Vdb=vKaC5 zRBsk(CsTatAt){@S3|M%8~2{!zUouq>}0nR3Ji>pKOa>Ht=`u7nfn1gPj-gc20UZ# zT@QNXD`WO1k)qbUt?<3uXM7PKi+>k9guh{24^__D=3e~n{Vro_A_F?CpnyX&nQZQO zc6JX*Ohb>bc(3#`U8cBtT1|M}&xU1f%LlvHJH-LlGid4PMCFCXIcyR4rwl)O8{2xc zfXi5!V(86{P?7ONbQrc59?Z~TRd>VLjSK2*M|r7O)bSN8t&s`G;)a1wZYRz`=R43L zx8-dod{F7{MJKmC!y=8h@NPjbHg$E1a?8EFVyeeASodr*>u7NqTmE)KyX9kPr)^i- z67LQp`;|+dzvPk0YeBe@WyYd=@QiwURnoN+IHqbgrW$vXT;|S2v8az|U0Drg9r9t& zRGDx;rwF4he?h~cc${YFihpnl8$9|An!9%;%SVDD;L9T@7+L@~I+Q7+Ij5;CCyZYC zbi}fRDCwJ(k@R!09Dd$diPoo>P_vkT!As<{WT=WzU2#I3X~MPKzj}#D?en1P8FRRC%ZRMc$4Y~2 z+a#9@QYUHo!X*5-=ONhgeS?!^h~122;5RUj?J2jS zcg^4NX}%!57~YEWy{o{a+?jbq4#h1adH#sT6gIq<3R@g-k}aAYBlWE{f+-g>CAW>H zYSV7e%<~|Z1L^Ge4iMz_T$pX@opw-^Li*x2v5NH2eU9=V=kE_Jj4fGR25d+ zzoF`-sjSPo+u*OFq7a7eg1;vwQ0pdBs9PEdm--!NTI)D#Eo3=)B|EaH$SCM#{|g=7 zdDA@MH0Bw$!}1$9~IL#It7t-Mec9kz&>-N+Oxmo8+&aCIg<%)|$g9h4v4 zJz2$yk5Ct)MB%L^Q%&B(9AX`So~>plWBL2$Fo8XYsFb?;b{75Lk7pS13hvG@rpuhu z{W)S1Q+0EqLjyP9jXek1v}qHhw!2H&(?b*J8$T1DSNxJc&25(M^j8to681{V_7uSF zz52NDn-wL6WZ>7a+B7V=J)5d+giDPCy7bY4a&I=m64N5dn)hoC$;Y9(x+fcT&VbF@ z!CA0T1yIdBlSbb(sWm8{RZmwHv^o!BlPYwn;V{o@Sk#_8b%wL=KmTG~>;W`KUwSlV zpE%@f5iq)n`c_Zz-aBh{aceu}1nCI$-ExRIhio9Nt)@`?5b(x$E4F7wAe`WQ&h$O& zs5*t`Y2CDy`b58k!rRU8S^g9>zpSGXT{u@I`LDQ3XFFy5et`?yIZDq4{{y4QO0kIN zGB+#v{#W}Fv-iA*Ub0D~aZx5@D){r)BtR_Jdd}Oh2#W13*_X3}q1OKt_`pc27$Y#d z{72H-{Fj*F@C#CJg^ALLVs`b}3RDbFhY88a;`OUL!2dgE68*R>1~p%#;CUI`qq7(F z=WM3^5r^>68+(?O;LQG$I9K?BJ=1->NYa`)kG<4>0%~`-7ksEc*aWO(O5r8+cKU~!uMjKrmAq=KN{7{cs#pqBy*cFfb|Q>fjQPMq!V^;#GE0f;*TwdF|=R| znM8dPPicK+ktV!{;=ZKAfgNDoazXK;mNOOOf{KG(c8XK(^`^g%{b>$orrZq^6c?Xe z!wrE~#Tvm3z9iiSGffRh)!ZdMSr!cEZGhQTFOn>0JcV)ZJ4%{qA+)3BDAP9m1CVT9 z?5}Fg&iA^DeAdVY@%k<$X$mIQaSeOQb#dLkc-Zl05?w)kw)UPg$&8!P!F&*_<{H*z z2_|gS&kT9;6g9=1ES?L~u85uF`?nrRH_FVw9JtM{8HDgSvykl(NTz9ap%q(XmNv#CM*jHboG+XpO^W z&NtlCk>KLWj%>B*L8uS2Ec z_H6FkNL=%AG29P{A$4C1N>X>Fc`IK*gnJzB<7ZXcy)9Vu?vYe%I8OTgOu$ zbM%&b6Kk{Lq(7#m^oRfXBloyA965li=d56bXAD`1N*zqR=ffWD*CO}jhiTm%C8qKs z)~qH9YRay_c-Ovc;(#2u>L1G1t94_|A^m96FOl8ou1a@b`e6R7X;|WWSX}Q|NxKf0 zQJ1YebA47SeBc_I=>Fe8W0x^=h10aUvrO^BP+ej0;0(NZc^-%E>dvAL&xDyuWAVxE z`KYGOdCCJ5sPv};ylGQWG`=5)I%bchJLREhVL6L1?~7Q|Bb5&R(-#&0it*(bK5Mav zf$yq~@Ul=%sD=ZmJ7Pb=%vi}X!v^f9>(iZ8!Q!syG#Ym@pXs=IqtS{2Y0(EKcGqY* zdT$70ijfmoSU?JF{Cf&cYjNG1o{D1OugCCiwgEj`8$(|>U&kYEKa2Frk}frCi7j8d zvwb7`vLlwYT+c5E<6VA8fx|XnSCyT3YDXcw98(FtM|(4mOX*6zHbFR&yOq6teTwz^ zR)UI6otew{AvQT{B}{AlavET>k{EhS<57F-p*SSV4g-EW6xG^gX z@+bIV#qq09Di&dz1g6S8&NNt(z$?|Pi3L3MLN?x2Vei*s| z(ycWJXLQ32dt#)exA(x?_AA-u2^Lfvr@?fSv&8K+T9o52veA9MVZV>Yw6(rFwd>xW zER)i~sTbD{SnZ^yPhQYFy@vgM%6))tlj)&v3dFr}VrS;wke3)2&F1GW#grn|nz78hsCoDmt>OhegUg%~{qxOqo$WeQ`slC&y}0kWA)g&FTM1Fa+^KDl2l!vkh8b>IkhPn8 zUO%0o&&R*uuAiRbC&Mvl)Uizd_Hu(#d7u=FH+Q0+iKbY&Ef_j^&tj`HU%{hpGR2|W z7g^TOJ?z!7)3{GNi1j|XOYGasy(}4q?DeJxum%KSLeHNNwZ8~&Es-e##+G7GTse!Z z`;5o0onvP$T&ZwnIO(RJhq@wbhT;9$k^4U+qX%1I!RScbxa%bCogK*f{ab=MKVPC= z&^-(rehMPnQYG6|J+kClj>P!m%1z@+A->3i4HARc0e4eKbGB#I^UvTC)5SRR!g-ju zrz^edtRt#N_{$wH?~rPa@|{+OA$5)oV}0X8(USK!zP6l~|4YW)vQ-oh4OUB`kAvu+ zPXJwXJ;89H0jqehr36&6Afs0wHubYM>yq|IdiphlZMKQPJR?7z=h=={*1Z(}6m_5j z?cG^hs17_{;>pxKT2arYFV`R*kRM1XrD5Zi(rc!oIC1VMy4E_?|Wi*No!zWWCir%&Rn#bWb^^53_~#3YfF69ov(yPyhW+ga=>R z(Yd&5Y?99yR`?}`Do1t0`%6P%{E}$8F7sn<|0U3@t~wAlUY$J-sE6fEA~Q9M5XW}) zqh5~FSmc|2?D)}c?CGZYgAFfy273*eB7SEoUW&ep+I;4_=LL{#q7xhUWGbuk7({X3 zjF_PmNukfaND~e1B-YS|^XrZ2e&7_=+IJLFEdL@uv@KCwes%~;dj4AS{PO~P)+W%v zy@AR@?K{E)Yd4yueFU;28*#5qj1*w!i28hA8+YS7{IP!x#jVW{9krb`M0AsKTQu0n zCHL@;hN>cAx}ZoCyYYISd(*bOly7c5D89UL9TeTTU)=p1_d428)2Y2|TBd^8A2G(l zd;xrwdydOW#O>!arH89**#d`d?E8CxA`|m*lk+Xna^nMWZ>SaX z4>^ywaSu4QJcS)Pr$md?x8mI~JWDXP6MN~}pVFSo1l`#(VcO+JSZip^7Og!5^_pj; z!?z2d+uYCm3`~)(C-}0lhxlFir+}91c_CeT8w+|H)A95BzSPgA4;y%RJg)ls7ru7$ zfcsg-@F!oBJ*u{qE*hSOmUeI4HXjXQBcGI@V6RV;e?7!reWMs&(qlpAIeUw7&Op%< zYBJ3fZ>T?)c2*Cfgz6aSLTW8cEZ}~<6Gzc|o*T2|d||z#MI~knGvPw@QLGpc0W12d zVC6~&R@!L;8T`s;@@Sc&S7!y={lNl{^9+|wmtUgJ;UQwel(gbSK7O?F{%)!6nfSKBiE&B%UJTTGU-`c6WrM(!O8j7^r^NIdi!uj*qmT#%91=*y*!TobJwSy=5DNh zeK{R-h@$;%{EqsOj0W%Pq4knBMIF$fo+Ag)+jBF>A_Q@iZ7LpHl}=CO>Wr21Jc|V? z)DopZ%LXe*w@(PWI7S^$o&G}g;!_;ab2oY)TuG&_W|WrVLYj#k*jb)=FfP`B&EuaV z|EU_qwHTuEDNma@m`F}Tf-(MiKdOF_M2!a1q?lciQd&>WJjQbV1Jeadk z&0^?dN-rFzd<3_b*^nspWs#>9R6Sb*{Tg?m;axwe@mxDL!mCs?So0MBCIpBY?K0uS zWSLO1YbH%IZxwIc^@D$V#`Ans3)-UjOIo1I^@8s$s4?d-Sbg9bFse0p{lF)5kK^;G zpWgDZcT{MkLl@GnR#U7#Q-!%(Em(`%A~?QXgw33Z?#R!VhC7oX?EHUdcIPSjv;(|p z^b-tP`F;7imU(+eknupy&S!0~;M^l<4VZ?h<4oX0`ZGK>VYg&E!-)cvpX6V|638UZ z73+?RTRUf^<-9!R&-oBUwjfC z!hSs8gRQBX@&4)%(!FiY2A$YN#Xf^TI-My$Geg<SIVT!TX3NT@l>xg!(G7RTfHopO|KpDlj$DZ@EE8sts|r6s;S zgII#tj8?Vl$?6)P~T$QPlZ-u+ntcS!QZH z8Bh1%ajStT2o|NaDed1^GVvw9#c%w}4`JxZ)am<4o8{OIY)4yP6v?EE=-%4Nas|Y%If6!}~C9Ip#CVt~OxK+Fc$@c3) zxB33Za$Ftjp1DfiXZK<#@jZoa;!l98!6^z{U4r&?S0VTHFKErsM!hW=)N_|MtJkq& zBX8`cqA>|#iFG@6sHh$)`sks@rE{XA>tL}{@=u7{lO$O*`I3VV_g?Ri2`TNPNh*2) z@ppL+#;=!X7ARA!pL(4A+^_~F3-)Z+FdJ4_w3&{1x#F@N zDhwxUP6w%LHs`n>!IRY!#rCtkvCjZ?tX->uU9rc>e3-N5_H5_cnv*uGfGU>UG6w;CFUq0`q z%Ek?lBl`|^@;scFXMpiRI`q#~fyG034x8D0mKbw{4moO)<&Lkge)bi(xM2(}yZTRR zZ;}pc`B`mvHI9!80+i#YmGN0!UP2oM9L~#0SD4uz2 z${OcX!sw}1)LlD_q83L{>u*nSXD*)u1-P;KK?}uG{f4lY+nt#2iFe}WK2~H9&{KSy zxk#K?e38ChvS4NBa-sj=2};X&{%?;fm#!X~i};PRFNd^aDh~|kXR@FOob~_tjEuC) zZYpncPvE-36<9rVC5(=k1Rd_Vvh&ZM!YaO(zA=6!wfzf(j5!JzYOouh{E3C^>qA(d zUXAdXzpKL>*JDt=h|3L!u-jFcSfcX)Cp>s3Ki}^!Ty{%>p$=X!&!Q*kC$5yXf9ryU zA%YP6iu<2Fc0tDLwPEW!u}AeiS)|T((bEXo)RTfRX@3Gtib!E!xUc=p-Xm~JX-DnU zqakU8I}7H%rbFBI;^06_Jn;4ojO9$)vx_syWT+=|=d-dk*E+M^QCX5j5|D*xPy2nn z!DLJgyAf?e4;uI5)wnWnefSIm+&7U|A%pRM&O)R(gWA+8v1!3M%>U7g>iK-$^{5Vv zey4&xs%Mk&1wldWa;SfCGOgG*4Ya~9yG_0tOVwp|bSg$wadLTgws~?FN^bFoR5yYC z1%=Ztwdttx-h#<4sVXeG^uvig4t5gEV-?7_zY4$&UJM!KGblU|nMe80gnqid))&?XOB?Z>PE8y6M+p1g^tG z^X#apB|PVDZudzkkh8S~lf^<^5~o>D32W*_C|qn{ko-dGQ}U8fg z+CciRe>|2yQK!bnZ1leC!tR)bNM844iqGFd#VZMG>D&7|;<(^z^ro*m>olCt%}fk& zUz!^qx zNP}k&$I^9;&Lp+CFq{50xZi#u-17e?j)~ZXI**o8=P@Ukc0ma~&J%>Rkph`b=Q-^? zt#DkPOmXjxps-1|qFI_Mip66K#hZ@`n5X{=rf@tU4(q2!eGU$z-Q{KM-}v5a+qQCf zMoJ#&&g@JF&S$aw#TQ97qzpTKR44IU6y-dgi^eInsHoFs3;mDDx2}(XnV)jNDrg)m zXp3dKPq>!G;<JA=-D{9d(Fr*?6iy}7j>%U`STYLu zm->muV+-VQ+A4}?eGTz8=N`>As=)pg{xmi&hJq%3#j%|Zu@&m?VE?zi?9l2lG?Jd8 z_4j1A@UG9{$?10Vx=;&Sid1=DbPk+0orCRuG2%z=18!boB^Iu_Azmz~5UHI3y@))D zyC<8$xlxTg8|@jS?Jq!2^Ky*;+!YPK?iCvi_u@BmK_Lwlgx{Z2rHVl+ib3uhSzWhK z+}kz?J^Ab~b+jg3UEd@ISO0}M#aT-8@!P1@CV*mi&KsQM-x1%7%33vGV7eZi*aaBc zSHus4Y_X^1Q=Gj_pX$mD@N;e!HP@)os>PfmryMIca@{2r?lfY*b>@*Fs0hoJg-I8+ z6N(=f3yO1{1upjVq1vIRa7v$BlF6ItWE>wyy}8E!)~ra_z5E7wwWtcybrf{%LPv5r z-+`Pf

;!FKKKu1CuLS;(ZS-wsh_a(2rVzF@saZbHi+?OXD$A9XXFy^^FmOue(!@ zTbT4^MHF*8kOKH87^3c~Dr$f7e6(Q=6cqXhW_Dgk@ypM#`p|H^^5&hGUC8+i`(xae zyzD`LIm1D`1F*xZ25Qf_}{m#Ui`)j`fr|$TNn4n0Z*5U zF8n>1^ge*)?NFlKuXGqRSB1%={;)YmRVmf31Fas= zBHvHw!*%5*j)zb`=>&{6pTq37MpGis=sh02Q1bj_iL-C3O5*HNI(*?i##HV@KdW>Y z`*1w7;q6*Tgr4$p=s3|)o9CF^8PAS$O?mHhcgnss0n+WN_&)Ns2+z}T#r&RRJwcOh zy7gza9r-NFcp=zD+=84%UqtWsQ88F5hc{X5JLipHYZ^C-tfCPcDw@N@Bi?DZ?z)r?SfH_CRbFQgICL5(a zm%4{K<3P1^sJWww)fM|li+fFzT3=zhUorGMo~`WY&>ro1Zfz+HqU|BO*_2%xtWQu7 zEnV+UW>Zf}+7tV*IZkeDUf6xK*Wyo;JyglUFn0cQqv@a9_$e&OGuTP7kxj zQlNLE{6p9i+_2J==NNK*cG5JKXq3qeO$L+q1_}0LBr7AERTQ??LAc6qE+w2!$HXyJ zpdU_BY%k7Fp42R@w9;Uk>jlLk&jYO7y*DIrt@a15|C}71#WUMl@ZqJS><`>5d0V3m zkJxD(ofijv>x#sYp+&fQ_+{w$BuCmce-!Gw@58#(VqE?&h&rh2(m3B^<~(>%@9%`=HLQFmdrBvjf@C zReH1_Fjwq!vMUw1jbp8xP1wNa!^ofrHV7QD3z$Kvm93$Al|O678^%aic#WNe6Hhz z1#=G+FXfp>c_*eaojM!Jz0ETO90XyjjTU=!e+>-Xei--7=|fko?Rka-*ZRa>WKXJ@ z(q?B4=}o+aQKrkK&1S1G;ZPqsd8QBjNjZ$G>QUTrCKvU4eZy{TJ=l@YnK1f-8;<%l zlMdx{qAn4drP_NRK=zVNZ2PaSdHn zg@;({X(C?7&ebT7KF>p%bgIank=N zI`fB`zAlWPMjvWy-BYN@>ufG9{UckkUPC6+$9q z3>lMoDD(X7-R}?nfZTh}*?XIhZ}cjA?f2 z&+cEYhTwbltg7Y-Hdymc#LmjXfgie{!siC6;$^s!H3Ezdox@#xei1e2hQg>_js31$ zixa&*L0W$!SQdK+JvkG+IOrgr=bk`|<=&XF%Yeyp+-U*lG%Rl=iM!)?XF2aZ{M=Uq z9&UPv-_rqN)z66&zsX?K2^-R_oPpCDY+2q+72))x+hCa(&SYbi1rOfeInJU``Y^Wz zug4#TfaEafb@CK$?s8fZYAe~6JDK$V9%T!SaOxK;kg>r(DNoms`fSn=3qKshE{~I8 z_$V7TaIHTLIv9a7`|N?uqgS&0=`(5n+^(e8&iBG*oK*`t<8ZnvlFh&ViDxH#K=T~m z|8kCCNHUMkJl7;IzFV`XeveMlGQ40F!>ahcV5lsU^?8uM4joq#raLr>+B}o`J=lb2 zOLA~|RSY{=lmoJR=W$%%42T?~Bsaafh$8B}=}4uLP;V9keYLJjdL6+!AB|MI81^{Y&}MIElvXP-ADu^FO9N=Yb#{)3@3T($hW%=Es6X?9Q z511M5qFZTvpK|pkh>x5VDV7l!)qfwn8y=6Vy%zCVJMUNcR3tOr_(t@4>%~g!9>W-i z53qeqz4$k}1X2u?<+>k+Ly0Vojoi|cosHyvao%U&^!hu#qT{l+Uo^>Ey9fF2TMa{$ zGB8SI6b~ zeo`+5ex53O9v;HfGzZYotR6W2TNW8?nucbX>9D((9PJrV?|cXPQ)8`HcMTEiOf_N8yw5_nwXRoQshu}HiX)`3Ze>1d3VurK)zwe*dL8h1Gh zZ&o4oRRgG(FMkLAeJobm7DH#1fe;?A!8X-&Wmiu4VjuoFMV_by9k)(2dkkYcV&+l# zhag-$#-H5?xCx!-_Lf4vRG?UR8f(&dCP21U>Yh7RYB+ff`_1cv`|e0I%1xV2B-e?{ z&i=*q35y*vXV)rJXJj&$C?oKE!vW8JE$)LG>HecW-XIi&;p zeu;O7K8v7hHcKI`jQ5wm&1A=Fw&I1~qgi`>0o-4FTKZNsiOPEmRPuAa?9TBl>ZjTd zwR}pT_ooT6OMeUK$vbVj?8OoHSEgwU0G+!v3poCwO@G- zD}FeTh-J`gjWKjn-3qER7K+=v7D}~O)A07SA9yHcKmMC;O3{39z1$*(EqiN@OV8Sh z@9UK1V>yF#L#<4l*46bXoxgCSqB34dNIMjVM>GGa)ct8E2WaY2mV8CoD8}?Ny>(Wrsf8;x?Khq%P7^}ct&JBHP zP~q%!ri0ynLH=$Y=MBahgDXvkf^S~5^XqakZks#Aav@0W7?JgEe-3#k>m^5NCk}tq zg|AqY$tdL}G)+mRvqwhINsngCTWt)9343^V+j()vf5|NWg9(0inS0UnJY~BXes7z`k_K*)D&<)7@}k0P7}bl3PI&7r310pG;<9_=3pcMx;^0TM%ckF+C0pyH6NYVOd zr6FIdFfo&tl3OkX6LB!M`vYyCxDAf;7PTdA+}X}&7v@Kw$(FaLv$dR^__pq&IG|sP zI811flrR zHF0lD4*1_F$8R0eXkLR68~@#iRz3f!XyBguD@{6V`%G1)_n$izzfw@)g*G(eJa_QE z3XJ%*3obTkV!_8&^m`W}m41d~qGSsi3BLlk99H$m2+@LH>JbH*NII+v2xr;CPyl^E?RTVb&d4D!wSVypS0ctl& zVRtqJgYVE-F}u6KGDeP<-5qcQu1Gvfv~Uo8$&MC-PANorRXjc``U)%8yD-fTN`jWk zR5}yi1?Q~q$d3AUW{sWYvNS%kE_|0vTaV1AX7@+bdX)2NZ&p$MwUgpxOBZ&>m~-JL zQX$HQd!dt)X|6>)ec$z2blP)6aV|fa3PNt6OSuCh3`5Z!(A9$ z;E$oN?I?7YvhX^56>4nskZj-HWabY|u>Qqx_IZ*aecZVhe|u`t!0Xqc>}{-MKG=YT z90`PkOcim;u(vqTcDtzH`I+|0#iXkAN6dfQB;9FMp=OV+bSq*%jIv1)znb1a%}Gj> z@g*2NWKNiTA&?%|CR2KSN1C1Lgl;w}@-OBmaQZ|E2JY5l%SPwX$wFt+SUQ@muvU^E zc(%q*&ge#(t2sTb|+G*uvb+;>HF<|C&vTz{si8P3!j@#})7ik|VWc#O|9_63$JyKE?2OT0@W=~Q zJiTxP)Li13{PaUbi8ueO8(bNzaHM{Tzo5uG4gV`n#?ZiGoVM>J7EvKKth$7AWTzpt zItga9ZDz;b3i7ue1dbl2bcM6%hmI);{e0g;#xZ5;XNvP1*V>Ub= zKgM;E)TG@3FWfpUT9ookZ1zjp13r)Wc<`XORk;x__dh~&Gp5tgk|$uJxe*$kDhrc- zO(vg3&^lRCFbs|K8; zPaXfsey0|qTHsL{)@wI9@=lix2YN!6_sPtDijq8>pO4;l&xOqT0G6ub3$>jLsH^QdxkXY-*+n&0>+w=O6)2c z9Z(|9?uzIx;g z-)cb@|MK&Mb_qGy3c?QGVQhAxH}?AB%EDefrP{n}VEK6r=FU%JReVl&+N}gnh5FE) zo(V9a-#nUd-_>E-G%pOeRfb2Wb!5?2&Uk5=A+xVXJK45qM|OAJF}U>ZBwOiRjGde(@H;aHf@})l*(w#`H+S}=O*`!1)2RVZ4@Dg7 zmrd=Hwy^{1iOj@qAS-IwEH(Q4hS%ZUc(>~_247XF+d5q`Iv~i$Z$Aek`ZYoAD&FCp zc7=BJIs{sq)`Ra~72#-6t90yjH!9C-LY;L^;Poq}V*e;XZciF$?lT$OJ6}|6<#Y4S z726PuIi6%5P zVFk3tcY{s*jvcwfkqt1hW(RIY&~kpivot$VF(g8X^>>=4IJ#yb+vd@Oy_wye&3`zD z4()7~)%5&|nXCj4{7qmjJ3HZusY%H6(%|6p7u@+K2uEchZ2P_ou)lmCi=Qx*O8xz) zcHuSla_~&5qXc|=eTQs=O(C9c*M`w6|Kk4B)->e5j?C$ICiLWF@`s=e)}$*(@jN){h*S?9+(to3%AG^S57{Cvkd zM&`;XsLYek0{c+T@M64r&zSknRTA#@U5+cetb#H5f_zeVu2lBaN|x14kgvSw!}fmp zfLHnN&(?1Vd&2ugK1O{J&5HL>dF)8qj|+ zE_nIj6Y1tvUDocmf;ytJ&OMYRbJt9zhc$E1;XSGU+M%MlPWq$Yn~rSRKo2;7 z)u&QMx(8Ii?!|ueKbZ*)^@M6oA@Dxf8QyZg>ac#c(sK`>gIj8#YVHbFW9TNf@0x*A zJ4Unp(^I4kRtkt%lPRrPGKD$b|BYV#mE~REU&fhjUCIB2Ah+drd^`1I=+1i*8!rvP z;o)Ow#X6DIn<>kyp1vW!Nmf)7y;Zcyeh)@nlX*9#E_>JUwYas+10Hv(0BiR-@HRw~ zEJnLQQ(`5~m^6c=O?RO6ngQSOSfkskV%iYBgI)y<8Qoex6T%+%W1c2=r43ux(aq>g z{5!*hf-gqWqx|9QihMPm**F0m%R?$g3QF?mxMXpdR+V`CnIKmxoM$7pVze-s>HcYhO*iJTf7b=k>XZ*{os>$O z;ySU=Xx`EFMU|RoN#Z((LE`DVA1cPUsR}N~BB}YICHuzTQ_g%Yqr0k(EV-*`@37lA zQF9Tzo}Ujddsc}_f2`QhiY+*Mk01zxSBUdR+(qDixQ6@t*nf&_HdE&l%k-K_XH?%y z+9NhIk5)alz2%0OHTN*Pyg8Z-8yuwqr~Ba8@gn@=XQp>M1E@Y`B`SRy&PH?g`Vr6M z{I<_Sr|wFWR+=C_*dN6-zjc=WGkgG(_#Gm3pF6!+7b&jg*}aypTcu;%fnYV)iQ1N> zQ^LBbxFKf;v$6dO4|~Rn_T7}_1MJ~+i(I4scgKZdny9wHB)oAS2 z23%$S8~+Zw!TQ)v#Pwk|@U`U?W=r8{aWV>Y6HKY=sfAeaUnl0i=n_oh{^zkL8?e`B zV`!hpJG3VYg8I%lag6Fe_!GE=$`>i&!wE;hc-$n&N#>joca&N9^Da%R@V8QE5a^bt{?YJ4?3j?-kZHY@1}J zWhJ@H2?s0s0JZMBFlS~B2}Pfz-0__lmv2g^EX|qnueJ2a@|84sp%P_Re3gBD-y*9T zpU-?&334w7L;SkxG+Tcv7SHQ-5-(kP4&`~hC?JSCQH=+)8P+xM^w2%fYwAD+C0+4t z*izAlKl9~2Z()Uim~*WYdzoexwN6mflVx3 zMl&<{%y)_(rFNZ4vY8KY)OGIPQoaB`|Fnu15*{O@c+!@iQ5d>iSw1tug%$O(rWe^V zcG8EjMNcj1PTnvYH|;DLuNg^?MoQRi;V|a>;yN02%aZ)_%pqjZ3|hHtAT7pRNKXEZ zfqHvoT`cq{cS|0brbUwOvxB&%G8MlnRkJJ6g8cg;JJ#=AG={{Mh?$cG`8mgD7+92r z+RL_L1B+t^CY8hVo1<}1k{_mUaqfN`awCQ&2Rbv4d#XZ&b0?f=u@{Z6zDE1T`Lt^7 zG0Kc`WYpt17@p()+WWu7Jqt|9g0osDuN{LMKQd(+CO2T0+Zz}<){8N}f5mRl9{z3v6-;n6h5g!dfnzKZGwrMPwDc|5+u0gt{kpeqCUndIz1Sbeuq7WGP5 z{w+R+^r{CiM}9AUc;%aH_)r7p8|c8YHvYrE2a<7iw+t{6XH$EO6D0NQhU>;0kc5>l zrBB=IK`*`+CNCNw?V1GSRCpgh`)pAt>8nza15t2R6#ACtLQs2YD@h1jg}tbZ~qa zY4>;~Mz1%cWucESq}-p~s<2|Krz`?yb{D@Cj9^6rgIH~P0BfuYqI3M*+kf{zQPEFX z9%DU)9w{e+>Vo$;;Y~JMy@KankDg-Xp9i4Ow}k1<4TI9$C@lZ25W7D;iGjLG^2@QW zrHtO*pg7Qpeh=@;TCb_n*2ZzLWnKl9{!SE|EtAQ0(@^LzD;1l+Z)73Wf^ao)DxQ#A zzz6R9ax50)7Z-ehqsfl!gdWc=EblArzI2S8h)|OM8WazC$*TN)+J#-2z88-eC(zfv zEikdI6oX?@sMny6SlL-Y`>H~jbrdm`R4=Ud_Y=<;ou;_w*0e$PAI_;-#IAn`m!C;)SG}73@|JDT) z*xMePqdA|ZC1sxK1AMU}}vrxFId6SO+34WxOgCElyt zIp&49`tu#Ik3X)cJ(ONCvC~%3t2G_o9x-C$78OVvg9W*Ng$Ip{FM}!)*@04Za1CktD=X=7_Z->#d1bJR^LD<@2Bhmhn>qjB z+3zXb-`YG-dVDhl7pvXIPx^gXO34<5Tdp7Y`VYmL7ryApxv&lwxWoG90QR8A0a6a@ z4qOK;=Fi(r9+`$TX-p6;isab{-|py@znzk7V_EIO!R){%JyKs9N{g%?pdxIzxGCEL zefXK$uhTbJ#pk3>TU6xEw}V-({ym&OxfA);7gCoE3D8Y#Dy$0-gl$Hi)N}g^iZZ<6? zN{);M)$^QZJgmbergWpN&t_I_%JQ)2$FT6YE_-}70ZU8fkbUMtnEx{wD(u!! z&eJWp|H^zg8Dh!?q%MVR>kXJ{k0H3@gB6@VGKef=CF<|L8NGMbD*Q%vVow&lmEZ}Y z<}Cq+?_=z*Q5-bw>Bv$!`!iUtMM_@wo^DNvgT?J;oH;e6@zlnV&BYXU#l`;z|$8LXnY#yrSTpu>jUgF&zv29U4uD$o!L0N1pNyCiFzLd;jsJ_ zWOuNl#+R<(nYo_H@9)OLeBSI6-zp6kHp8i>^Vv#13tDbJR zo9!QJF2z*)u=O$}IeKYQ+mnxq`k5!`(vXqNEn^4oY1l+nX*rOc@{>jSwBV-eg%Gmm z4tR%I!=nMpu=<)Nn;A0!26PhSeFnY)C(l2k9{=qONZ3tg9n#^z&LgmEsvwm1)sRd& zAICrTvti)i$9U{-U$JJsjkqm*Cmh*uPIQ}Ck1YX#_-nl&FRR%h9vy6__{wK`r-pNG z?0yFnWU6eIYcX6O!QJlb+tBHY5#GqMqwK*B(!^^#v(t4X(>3hK91C=)zNi7JUlw5t z|4r5HN}#@$ec3o&W#RWZN1AKSvyE$&g|2)4h~IaN#rp@}LfE)^(JN~8?wTJQ->SAD5o|9rq@|vaq8r`ZaTss}W)DDki=aP~r>8v*AM=U_e)r9V6*+5L? zKB_7jJvzNH62DK_i~8EjSd@u2%WzysAF5BGa$jTCZLB|ZC^w-knfdIratj-Kz#Phw z67bflZ(tO09K3nfeP>`B){N+Y>t>(8nN|ZN+rG*|rJgD8#K{p;I>bux%BOJo?C#Ad=FMVp-?5v}!+H+e_H8(u2|Pch zKasq?brA17Sc<1vF%9$2fk!R3`3&qkMp_S|8B7%hzFG+`du^GUL73uL;Yu*g{3mtZ zwngk&%spmd%{alW9|dvW|IRP_rJ;P^F=z2E?4;ES{^2UnVa8Gv%XZKPn#Xz%ERgbz z8>Pv;im^Vygx*_bf^v^g+CIJzI`urriZl}NLt{3p%6lZf*_?*@8&sLvu+Dhn(slIf z^ATDd@^Nm7Dm#4QF?)3@Sqw4?W>?0?OE&yjprY?aZtuhBz#rZ_prS0V$Wg#-KI`c; zemRW4Ss}@?8qw58kgvWa$BXOhVB*=svO9i%9p>-WkzCnIH0*CdyC?E)iCcpF^bC#4 zPrnDV2|>3+SP+b4K6uu|jgU&;8z(8?b`p&tOFxPta;!B*k z_q!nfb}~s^ePtxLujAjBt15zf#B!>swG$Uu=0P8qaTGKw1P7n%LZ6oWgI{Y>iB&7h z!;3CZq*^q@oJxoGq!?PCtHx$j2a!&G1Zc(X6%TaKq$m$xad}to0ONNkwFWSc_|G`1)sId5^#>

g#l8UEx2gTy1%c6JdXe=E36$@Y5!R91EZsAmg9(%5fCl6gg4=YFd z>be9sTuFrMw2J2EaQ^SRH8hR%AW8qFIM($ETX%Vhw8%ON8|Nwu7M7iGOSAyyPrJa& zH%`oWZ3zaLE`|J?oN=&mMx`mQ!J}cT;=>YCHsj!J(Bpiaw(Vye($y2C6>;40DF{j@ z0@$0dRPc9#&&bTiW-E`;L8r;;(fI=JG@qzwi&L+o@Byf0-=$iviv1 zoC*#G=iqK+7{{HHkS`zrb4^d$APRKze)9 zMVzFmQscO4&@YUY#vF#Jf^71M$c<6?sBg zm8_tpkNBo{5_t2@kk(EjrRFIM6MlDxH7n=P&sqFl(-gFn*@3@*Y-?$xRS{SisM3$|#jyMRQ&8v5^B+gIqTgyn z17FV5C+mUoi4*WSPmm9tyOXt!+l=`_5ps`aUHu+*2MvIb$c5G zl=04n;eQn&jhoow2R>|ZTLj&WUJ6arlP2F#p)DVyAm-j*3EEoAF1 za)-o`wje>)Qnd>kwd8Mle{ZW;@& ziSf+)l^`s+md=)*AH;%LH8^K&5ibWD^In%zFy>)ih5oPu(8L`Ey|0^rVMjG)msJW8 zCN=2lG=|;L443Kjv1Rx6x=Q!DC$P&Q88mls2YB>(0X#6+2F_fQ?t0pAk@whll z+G)zhcHb>V(Pun6>J_9N+zOs+IA2`%1$xJC<2zXH0e$fnzwPmsrO&Wn)2y1Lui^1H zV{{7+*$coTI3u=8U^9<*q&&q<=+@yeuD|dJw>>vwlZ&-+cH?2}k*`H#H$IfQeZCBa z8MA1(W&*iY_u<_nrP6?}%c!~61~$ywjIFWs6RnkNq~CT6#LIi8|#YJb5;;;eJH^K`(h}Rtw9ZgtzexqopqnKj=i{DhDrMKVWwX(%$#R}Tesa; z^j6!66JP8=pYYY#`b&*{&Qz9{eA6Lq&UMULt15)?jxtk|Qdr)hBkt&vNvWFMMW4{g z>~^CWbMaH69nITuQlpA6&c=nj4`t$H;trT@-^D^pb>{w+&p29g@JM!&Ls{SHaBPVg zU5_s0{jH`<9_&NY8}~y?&qfrDcc34i#YRM*#O>DCaYkw$T>2siRf)>-M_-b_S*sqd z%v^>pvj?F|+$CI?*5l6;^si`|3BFdJf^ePgG@ASJ8*A2J*m4E- zXR^RHdx;K6q$+0vh^O zlHNc!8WFAoCbyEE5Jg=+dDd`|rm*%Dw0WtCdeP^W~diWx^M%4eS6K6Q?OYmRREw9f5|cD$8foZllEW zcSZLN9rRS5l7i5o|9h6*H_^MK|A|w;m4Gp8c z@ewd($ZT}$RVVhG(1aHsTZ*yuoRf853+JvDqtVqaG_wq(=K5&r<7*Ep3uKTU8w^oD z_K2gd@X!C~f9T?+0YB5V6p^Ep<;~y5!|8wh$j*H)c=ImHTHYU0_9zjDjvB?Lr|ra6 zM|jc}Fd6D-AyoGzxZLs*X3+3~Jp}ygiwDC#+3kzxlnkNX!Qzt00ZnsMQ zX^xP-#suaxNvQW<9BYZK5WhTp3qN=3z_nm2=<0)$Lj=BA4fn z&+LMXeWfh2pBZ`RT@y=cd|2KoCozU+l`B@vg48$0xc!Zar1%(vA#K{Uy{i-47N;}Q zO|1~EISo&>KZC}9s&WtKl{74B3s{~>md?J`$5p$G;L^iTa9$V>H?sc1+vrhLHpqyr zPP(k{y>da)NLCg$SVq8(t0qjlz8P0uj>Gk`tFVpxVt0p_Skq1 zM4ta`IzNgftu=wRhI3?5kwW)JE5y3=8SGK{5EgPZn+|eEp8kD1c5#CSIA99&wO_*? zWmU02&IF{|y0h%W!R$_%9xmt*3@6<_aK?(W5>xt;Yqwz({Nf;lQ@!H1$^y6(%6Gtz z_rv1O0Zc>lfHbQ~&ZhsTfQvydVZME$V#Q8n!DwuMwA6KBLsi~`1^4-8j8KtxvzW|& zai;uTPkZ*uwY&7Y=mM^I{8>CRu^D6caKE8$n>71H1O$ypfa;hhcq{z)ZRxb+CU zu9<_QQ!hc5p&$QljYPv@Tk*h5&QV-?3*W+G@R3!l6lhpN(`H>&^y8WKId3Px&Po@& zA?Z;}^*S<_l;p-+%F%h<4hqOSOxM1?fbEY0*u|^@^t_}lSw7+&aD3jJGGPdIv%Z1V zyG_J8r8t%?)1$OI?lgQ>1i~`ldF=q6<k~8FZn4E}WS)-$a4YC&}utlDrFd2>)vL zrq&+nptGld^{_k*x5g)9rJ0g&!HD62oGCQM{45@O9>M;*u^D4Bzk|Da49<$)M`d3- zlD;CAU9{{+&D`hr{=zNL+*OWG`X|ENsy*;1eE=m-Ys6rafA}aPS#sYqgo3_$8)Dtf7C^7<6f`a$FoAtT#U(X3t9mV$Q+^5e#vr8g~Isfo{3^d%$Kdx9P_ zEftv0ulF!$ouuf$OPlTMs3VQAeghLio8iSmRp#^Z0_yQj*LTKerI%W3Y0jd~R5bSv z3|3pu=hJm8KOoJ1^;h0?xcfN__6ij961X$TBp+_LD{?#(3zXY% zZ8PujY2f$i^CfV8Zv@`fY7{qEy%+sTj7fr+m(mUrkx?HSq7HGMQY{is%2nb(esS{5@sXEjz52smH|! z=SyeZ4&kSzb--q@9FY))a>@6C!jaArIpM>+~SH$y& z>!g6Z4Om^&ojC>Hf&AB2te>F@eduE*d*HqRBm2ITHur3hv`SN{U{4g&QA!8bAbzg2 zwuFbi7h$4uCv?hClJEcK#H{ObA)u~1eRgp`jQa+OvwGkcKGl`EZlp6;ws(g59LUiF1(}D_d3k!;ViB3C}5DTkk)1- zQNss)n&_?$4H-K?zcil3ZP~=;bp3|c=U7s(z9Ywc5+TAa75dEW%e!s%vGFZOu^?b(elAuKLK{)CnfFGv_d^v} zCiGxJizCkV7vvX)%kX}GZ}dLjhn?%FB5xjf7PsyCj#@T@S$$}M^z&mIZr~2kLYpb* z)oC4dwhqNf4g;jqFMG0kU0%TYoNCFxu@dI*Uqb5)gYcSDH}UbBEflwLnaGTWv!C7V zaY*J^y7AZ%e&6oT%=Xp6%=r^3@a7J&y}+01`#x8cR@Fc;XERrZeTGMIiy&ppO7@(; zfA=h5FyX2ptlg+i8hqZO_cITVUFrw1lVTuZ&?PY1x4Jy6?gu1Wg|ZUwu^7_5lIf|F z?8ISHQnulHNY`ZMwy8UX+UcP-&mWCxEr*z61$@4mC`C_hli24|bdWrusqul>*rxy! z+N>GvNX8>)NLuipAS_z*Q(W!CozF+ArM$rNQsw-!lwIge%R-cdiC%qi@&^^!ea9C# z_S00X>eZiFWgH;gv&ZrF{Y0>Q^FbChZXBN2ttyA_V_2@4nXGWmHQ6qc6OcHeiL-Ra z#D|*I_^$h5Sx>`!ToQ2@wx|ukd#;Ayd{B`8up7^oZ92vK439B`o3`<;6Gsu|56}ygVO8RfVfT>wPID26m$jGpv|V1d|SV z-o*LC)yQ^fANFj~eCQCooThk0(4+%bM5ixLU~8TwyUDXgA^Fp2*=c>6&d+99zjef- zQ*xHn_ynsLsEHjOFNbfnc9gnsF;+F#Lnxa^Ag|)=@i35`D+0%lJHVB*RviOs;pO@O zG&t9WUDO4+^M8Cd>h?i2;8*tagA1AVuxNJwrzUOU8H{7Q7{qvn)A=+%?6AKcAJzRQ z&hOooT~>if73Xo*+zS;m+jpT^wkcbbvJaBa^@0x`{4O@8 zRs5FFpIuq4EU#X_7v~a(6t)e6vwFv{I$v3Cy~T&6R^~}3{td&5+B8_cW+r=**@3Mx zxFJ<~D+%hUvCz8f7Mf`W(~Rr)Am#?&somAbpW9dC0WA-((`}XZ+7!Xvp`4=$S;p>0 z3i350BNer^a_Uj8B>b}Hd*1(Qfpy!!0=9YJt$~j*&TTBuz6E$CzOgmuW_Q`;qZ};MX z->cF2*M6B>W+=?~l@39RhqIMOEm^-4fAP%v`wjyRWQZjmTj-lOm>zrFk-9&h4zKlY zOMSFmu=UYxSy}5ocCt6;WRE@)H$(`?0ayE)O<9X&vWLp#H%~R6js0|tGj5i3h#X5IryzN-r>Px z2JGYKDXeDmC7f=w6k0+@v2*8(VUfEdESH@||Mm+2#(mgM|E;ua^;A~kpotqA=8ERU zs`3fOU1;N*BkY!&AnfJ&4!>0quxAIrziwx#KIA284a){usu4}kBn;?Z3B9(H#F%G; zIk$-)t8s6!4n9>UT?|UWsdZV>il0ZB^9m*Tp`~YF`ukjZ zx?M&7)F7V1ji+OJ>mT@W@V=C0P%AYyXkd@*YV7fGE1sMD8q?~$nB>?Kt^Vsu*WzAE z)#iHA)#hu8NA`O7JjVrpJ{IJzMY*VO^1%0;^IpSqiL<&LV@YS@=@%?uC)PxXAx)

-893pm8rh9|BFg*dGh6Ol`WO2IY8{_rw{P)yJL9ReYjp;Df7zJj z-u7f)4mg10tM5{_%?fdI#$iRP=|ndAWIsChzAqa!OOOX%@MVuq9EV%En9*iwCZQ_ujX}?uHNWNs6-YDnuE+D+=k9%4N*v9M8=28qB!z z4!q=h-JG{F^6}gSZl-GT&Lhj{%*1TApyW0_-=qzpCf;n|4t2W9XSAAX@vweGGT4vk z$GUw>r7bfh3|ni1NBvT0p>vX=AJ4BJ%9dmDEJs#0hI5CeSv0Q8Q;d1Ei&g%(fse3R@TRx38;6@6bF_H{4$BIh4Uv%)kzBJ_WXz83W;qJI)c(a=({5pOc?hM%v zul~Kl?vHKoN@ZUdmY<3?^FHt%u6a^No6fZBP!Wvop(IaO+XI#!dn{(%+sV2n>tW4O zk{2gmKI}TuP7K2UM>D=@y4{G?znEi2KM~jJE-Rl*V230 zxF$S_zImxLtEP3ZcugQaneB(;|2=`C{?pmT-BX~y#dz*Me2iVz{lXa>wLBG4PM7xG zgwy-az@hh=tc`-dlUTh{p`iDG*zAo^hr-3&j2&rb{Vu>Wq5it6CeQ%$ear`;0{9#$C=0eIRif55E z-gIe22JFsi7RUZ`hCfz^`ETo{gVX$eY|WZ)(*4LNNbIP?iu-Xd}{e-|kx`s9H923^{a%)jGJS>pH`k>bG)o#~Xqk$qFzgp)SkY+Uho_?V?4 zx3<@)9Cn^R=RB6Ot8;sh?^sKP>zDsgbRPa({a+lvpATg;P$?s1rcz2OeC|0a?b4u7 z-zFsuZQAug$R5!UNuhuPO2Vh0V~p=SxY) z(OuR4EElzEfr++1WW0qYR<`Luw{7^&Walkfn#!^z|01+vv#tIJ{i(1 zo)`Yf-{V(7dLm^;Q2NLD(uu^KoO4nkEj1j4>-9K)W7a-kmv&33L-`(gm;LAfytgxM zHhcL5@Y?6U0J-7#+@Ei;#7q^H{H#1$)*X-gy<&rT=At4v2(yCfq>R>f_$=xg%aR7d zj{IQME*cH9lJ{Zk6Lb8ps+fHX&SyJX1-!EH2>dv5584gcNInO+NB%7Tj+S%R z-NbyR%{j=uMhQZ0|5Ud2hA!Q&>BSy|^`y$XhhgLMb}acWpFOpI%0|cTB>SNW5H)HV z9aH(Tq6eMv-nvu6U!@(l#rqXk%S zE`Ys@lqtve97IeAeP-9B53Q}!!Q;=FQZb`RjCI|P$7_b;{HA1bcWOt0 zr{0LekMepbs6{+=_9PrBO@?+cYp~}8E{q{ zYY~kbdp1df_@2ks?^8h|xsY9J_g(7V=71uT^oEC6e1Zv6qFOaqeZRJ zr`Uy^*)EAsPeFKce^&VBx8!gmkGlSxP48ytGsTEhsSk@4cO9u^8qdrze`C3HsB0Gn z1?7-;`vxpO;(~nwy1>yXR@BEjh}YYld7HRB?k%vF2JktVo1O`&HHC?t3^u@vKHQm} z(GIt|d=>vT3vgu-?|n3eO8-Kuq($X{;@gG!(#sVkinV&jFrfYluqlmjslyd zc~BwR6m=rS#0%og%iQtZum@IKZw1x3wbBW5zN^q24b7H4$(Z+clxka5CJ&dh@<$#l zH|~@acws0#dGn6a^YpP#`52n9rMGnaqapp^nZS)BX41Oiuk4-qEGXLjOM0}Zk z7gG*e(|zSWW?Q#h9x-u1?xweM|{7!JbUmMbEnn^mxwJD<8T#-FDFZO!&3o{-E@FjFcT^z}@nH7(B=njw3W8tE zM)f*Vyty@$-Ed9^U->7t@2Me8h)ICyD@(<$vuCii(cZZF-8LLjuT5)t*7}O}H?h*d z86f2tReR{+g_BdTNXI~B{KXE#&I5VQY-FB^ov3f-0m}40jZ1ZBVI?kYIQBs)tw zvbGHR8q0-7eg{Ge$i}du3Jt4H=W31Tcvd1 z%@9SYatM^o{lG3y)xjDMJ{#;|jUmS)uwSDESDprZZJqKfV?*RHYZynn-a6b%v+Le8YOksgRpV_rr`yS3p3cwTqddEDXZd9q6|aNu5+l(!cRwtdbB$eHC{wH1@8!lxKQQ)i7;*0=xd`n`Aq(05g-e zg7!}**4`-_{tWAkTZ*1a?@T(8s@xWigt_8?|F*&Hlns=!v49QHDW-{~wF+n6H&x{r z<39CJIK??#sg3o}3LbdUv>tXP_NMA1nzS@v5A(noyth-t27U==7eASxHBG@M6>VJe zJmsu0Gn}s19bgG3tE9`9lUV0dm!&C7H8Ag(7gDx@EKe)|KUrHo!?-V{6(x$9VkN}Z z&jO=;&tTrFn^@gx8uykTq?$c7Y}Tx^><*vDZ<)Y%*-dXlRV!zr#bofi-&<(U9o`WI z>cZuOtK>c5H@ok41k?woqLy|E__t|K3B9%GYQjSZxx{xP)c>%r9c8FprAfhU3RuU~ zS?JV=iXa-#@@|jD%H9HLHVwwnKW!;+MqeDeQ-TY}`rxf}&MnU2wY^m^&gmY`J}wo6 zpM3>ohl(9Im;23jn*L^sj#$%;BO~#HWijNnS1AfiPK$jz$%Jt~hw*-5M?7X4s~Dhr zgWlVDsn#^iAfNk7Sml?MqA)Ol^?Tf%dyG2cngEHcot zmzOw<&ve{3o{;{%P!sm^9`hzX3t8$I&njQ#iYwZsNIzl_)m_qP*&9J{GBaiU)Z>}e zBR+?fy%fi$o6zac4bZWr8}_+2lO9}{0C%i!u@?vCV?(04;M8PKSvzPtkErJIR%1X zvC@k|eGkHn;C?LN-*&d+$2Rha;r!>h2iW=AGt#&yGc+E|JxLyx>|0r#qN(!@1u!gsT6Y>ab^^mF)Wb|TN3 z?r|Pc!3s^qtSQ4wg&m3Gn0)an z(s}JnJ*RwObw+mBF33}|I&qrmmi!Y7D^5X9VHuPyE`e^NInUbXC3Koh#oZ$S|E{W_S=e@kVJW|>g>4b)3UP)aFilyoG zpV{oacUZyPaPTO~EZgyKHlCz?R3nRHV}0aw%E}qPepoJ5ILL9qs1f2cm4@rSJ@eQX z=eZc`6)t@}WJ}#tYv=&K<30FZK|{22$#`u7T`+dXO;30h)Oi(+PccJ>TU(gC-imec z?TY#@y;wO0GUH0QQs3w(9A0voou6F@!JJ9{R8xkJ+j@hQ%V}CPD~q=3l+vX}8E&8P znH_9>4@Pt3!s-`&Xx(vB%I(OR5#I#C%gK~|`lbezIwx8Qh8d^5`Je zKl~zdN7M{%4No{tAE=!1Gs4g6-X0OuH7WmR$)(ZE6%dyEt*LEDdwI&_uJ z={B(;{2pZJd>?fNU!!~ctdsX*a7cRs$?b+wA^+@XHt&KjN5|mvm`>z!vPnva)yC~l z@~~Dlk{R2)WJ5VOUNtYBy}Z(kJPYOQPvknfHEcS)h|GW$=koASlZ-yqePpWn^AtZ~ zGnwy-NZ4%u8a@Xdlz#NqVqpSvcDKYQ&<5w9-i{htJ2);n24_Bxr?*qq^TSD9d2&H--1vM6 zycW&avBG}%w@o`dlw*J;_YBb7^i`>%d?BpJI)eI76lgWGKYE`lfCRrPNXodxYz}dL zte6d*8bZa5yE#X+OidYGq>gVKw5cb~B>AQF_+-R53|t)`nqJ7pmFp|fc7sgV`J^u@ z+m@Am$?8q1bz4*uX1`+k!waORV;72fZ6sQMD+;TXBXHt|Wz1s0T&d`gKRVTXhsSz( zV3D$r_Gr9=Kcg0te&R+r*Hw-c#(W+>K~PS(@X4z+2bL9I1KL+dVw{Fd zFd5yOed=4qSt){YVb{m3d}0rlG~ync-1JWDs+CH5FL}mi=m62JX9cVqCJ19E`HMoA zcr*;+J$i5MzmC1m(nKrDSX|6fTDvjK-_3NIE%14V5;6|(hZi$%!o>r_*s>EtsqNOG zpgSy9;b-L|4%Ax(6F$trXwEA044#4(UoC0fN)e03%9PdX1?E{B2=QLsX@GJl$ORuZ z>cJdl3W9LZ;Xk@kzLV50IkHKqWm1IdApPCv!yZ>R!!kFSvcWb7S{J>BozXfp;pYt~ z;!Z3by9gZdIab=#Q;#}5*sln_tWAB>=d$U`hT>4OzHG_tZDN-xUt#e{b+Y^3j_9q!W5sr*g7W3|b~x&J0-d*9FHJa#?%cR7uGLfcACmhadpt41+?ZZf<7@GESn4;76E z*y3`_P2%9Y7nqHj0kMj|YHjz4XEfjam>W}X~b|b|+{%-tqhQBH~-BKFD{8!zm z+ps;Lu?Q(~x=i4KF)E*Ch*QplQRi3M6!o}BN^+{kJH4&Mx$UmPm-7O(_W8pOFIN*{ z0t@NeX(ii#>MArE8sl&8325`XF9^GrQ|6by?8m(x&^xOHs`TWL5j=rD@?6H%SzXwM z@9#yALBrU&h-K0MgQw!(N}f-jpCnCc<16{j5riP85|@I&X*7vHZ&7EhAzt@C#F)`| zZrMOgmKjoy;5JZyJ6Bx!eKzT@x>ok!i=3>2FF|ueFIKfo#pVbBZ2b*q%K6lrdoh*L z;;!n{w0$po7UClgSILBCHknP`xeA*4IaA^%1Dd1}Nq1bl(LC>lnAtd=(iS)%{Bosy zlT}n{as^*@O<~#-xc`syI>wIPO#>RjRA)x> z;{BQVHuS_uk0CT`?t0diT-bt!6!f9?F#1xNC>$S%7Ioa!vOZ5d)x{K>{#AlqpEF|c zkZa=1niNR=!FyAYpTt^yYu37D0vj85m@RlH7hJeUVRC#99Q>(|JP8lwJWIA=?+6-x zTMlUvd&JZA>Tu_sISzc*ovx0X3q|^T*Nykni$uv28mk<>M|kW%>$;YCXtz6Es-S zTtlqzdLg~MXGp(xq_f}g88AafuDmc)C2G1xVKUDNOghsMPIpV-->su~=v_7Be~HC^ zUIpxM?NIh9$q>i&Rp999AH^Le0SdJaMJ%C@hPYZWo>hB%GC=Y_qOI~DStd?XJOrligI1#x#Bm)ap{l+!y0*8M?k#F% zCy&aMMt#+l<2b|FG={tX7RI1Kjt16^t%LXcc~Li>4!_4#i^q>|Vbiu)V#=|hnD9kO zGhT7-yStM0zp5_yWgAn^zLjvTL4&&?bAg=~aQrqaN^V@tK6zMhhUy-+7$1UDs0I2> z4{J zA?B~`jy0k7Wn<{%NsfV8q?i<<$&OF+w61JTb%{mE!e@D6Uk1Px)z3ay2 zTs8v9cnFSqx`6Gf(5K{od|!K|9{ciq0Q#)v%#v2kMwtWUBD&E51)rDwPv3_R4a#_-LwLzNRz$}wSq5l2zu+AZtEwoR9WxRhl#lQoV z0Za^7_uaYcs8Ot~#QPb8ba7Rz1|`@hvRl=Au|;-3%9^=>-8g7O6~FZHy*_sU*&Sy0 zzU*e^%P)&x|I25qf+Q*9^K*9bK^6NLJq+C){-`SZFJ(9P-eNHWj3g7I$8?FN>tqo9*- z)#&BV6EJIBf0{O6DE#Zafo2Sy26nEOpzd)!>$s*LxsTr`9=Ko3?%G8%S=XzqQ74v^ z)>Ckl8~2=VnglBy9cl2WII=F>h3DFE&*UUmY0~q@l59>PM4T0rD}^XFSX{@1FL^wR zZ%Jwc-@@O}YzWj}N_SgxVA|8AcyIj@42T|y?1GHDYkT6C=yd3sb5p#*xreb|o5kY3 z3$cHn4dT#LfBKsH2&%0zD7af&$o|WjraX5!uX{&TtkYh$`_Eqxy5%!WJqjf&)$stI zn<=IM*^OL+5!}&}Y97t5zTbnFx-~NM8lH<-cYw985;5*iI2?KILH~YF#a%-?Nbx%3 z*snynm=c@=qB2eTZNuGkUAdphzyzb4?@1X}XI0;ormUpV4xBlc{zHe~kXLd;iu%uj zc5GjPKg){Q$lii-Kx02v)u$19Z8^xFwLr8;JxO-_PA(fCi2L^VFlQC_r{*cqr1LW= zO65-Eg(JXn(OYiNXoFjD|yd93=_{SBF|ueTif~lGocvvuwCS}AcyDXb}j1{e3gY`3iRT+vAZ2ML!pNU%C z_u;W;+e;#U~45)_V-$DX$QYJ(uMUbV{lyCH zz0%~??~;e3mnwQ_2xpQw!j3D4*_OuR^wDM)4(8b+AGsr0hi4(aeB(V)9lGW_5< z93WJi!b&@#V$ekPA|%D7eES*rEBprA?8)@kWGQoSHX_?MhG@5Tr8IVCH+&P^15fL< zg7uW+(5{y~b)CT(r5yy}b~xhv#XNhyr!ARm`U(9o=CG2LZqMVmr-uqKJ-`Im6}xQ zRIBq?JTj^Sd8`!45-Yll+>qjSIRqnr&{xYnJm4=PPDIsH5r+dxY3VtZmhr|$|sPv z^0~NXV+1bJ)nPda+~0JdnOSaJ!{V&eDEEmyzAo$sUbAE9M6ElnzuXI2Muf2W2e{Lh z^V)KDsxh;9nW24q2f^)tuV6RM5FYct%h_@njs2m8XAiq!|3VpF z9k&Srd*`tG*{&pb$&?RfbfdKa9cc6#YdBr4%f2<*Gu8DVS|H!dIy=dsU%o#xU1|)S zALydrb2YfbYwPLWJdYoK12*v*?C2c_jMv)$J0qBlk6Qy;|i+56ev1y{HygYT2q%bESi zc+}#v?sJB^+z+osK9+-MubP}JryJsr9_PgSk4?qBfv#ZMG)wj1u?y*XM!`(aQS7&d zCZ$GvhW-yZ6XJ0a=qm?6?*~25-0~w#IKlHN8gY^_pJl!a?!s~#9B@)g32bf*VgFXV zmvp!?_*@jGn39sUv0jrIvw$rigYqqPcuX2&Y35x6K^TbES-Xu z&5O`FO$)u;&PfholKAEJJ@_*+4X);dQMs)-`*rZ0_-9TKJb2>ygYJXr-%`1`ns%doIKEf#ccH+2$BocAq^tI0z@z zsR>Vy=E9rVm2B&xquk5jq&T@VlBFH%1NSpmV@kzcwmS0{-1U<{-XuY=*IXQX;6GBm zIs{1v{ovVozAG8L7B&YvGmH9hPy}}zp%lZS|iZAL5-|GGilC? zqhR3RP7`-nmku`zV_&}SgM?ZKzK1FkG;98-j9+#VJ6{XqE|U`wJ9Y$TG+0Qlv!XHg zX&|-{4=`BJm2!v5DL*qBx^v&7zljUF@{D~_&A+=h76@IrAgFo=TscfdYq-)F+=Ych0^20+!JH0$(kKwsDg4u z^LMkbwtWNqj{hN@HZxTDS)63XrS{J6!-unJ9W`jmsVes47O&0b|A4HTJ7O^J)lMD} zLGve!hZo9W=y3ZKJfE!1J+@Yme|G}bV=xGugEz*i4;}oEXX?A25|3}Sg~P*cvRM&= z@PT;l_C5DYjZ#62zdmDWTk*6s_UeB(58;R8w-Oxh(#T^<~Ky2Z2>Dms| z_h@%?_Zm(1`juFi9fyzZ^<{s)E@5Yn2V(T=lhEL#3(v=Prj}Vv&>YUcB|jDD)p&}1 zxXJz69R#IcS|a|P(G7>hy=2G#O^}9W`@zwnazRtW2KP^_DXWcrDJ8yf!P+%O^yh3S zSQ~c`om%fGM((#0KURmp6-t1130EQ4!V;ZJKf$DuB*+pyQ5^V$?R}CaZdz3jHH+0; zUHmRFjd{jom~BBF94%-_)H4@@>wSS`@cr-i1u)WUAVh3EjJX@ZrISWaAuqsG z>>VR5S|CtxAn-53vXIb*=J@DVd_Rudj3?834WpsT;`oH@zbEYrsySw75TK`HzE z_zZ*=+k#7$J(lyG-u6Z^;c01sXyv4#df6ux*KOU48)d)Pi7i8LZyNtSby&^p)>N}J z&MMSQ^kZEr@4+Cuc*TAHLs<557*q1x)cpJPFldSv?peuQfp#^}w|WdL`pTW#jgv{e zw;gAJcgFfYMUWHFkxZj4u)KeXI6ZI&=M0Fnu^#Bpu^D)5LI=3(FHl-g53I^~B+grG z$zs0^CMSMpy;9o?O(vB>me)0xoBae~@%39!AE$&*aVmDhzBej@Wvts#6E-ecrVN~T z2opbPP4|pYr4_9nblI(8yz}5X&|N8Zh$TA z6l|B;5;WWrNmHixhjx5w;U`xYavw#rD?BGLZ;Y?B%(6uCYa>vK@(-}G8a6r79$PF- zsr*9}xKRfh*_6!g3=_a-svztbafUifJp#|(??8)TJ1E0~Z^%qE#h@$~{CzPH#?7*3 z&a5XkXAH%}tT*gZpBHr4p6_z=XR@2G1Su~rIXH**Q! zyv}59^5x4 zLiGJIHt+gvH2gD)?mB71hH2gq^P&&VyVD1?%13e6X&f`M-7KA{ab&;v&en6wjd=Th zx@w}?Br4@~@xHFdDY)%m)kAwXI&Ss}zECl>Nqs^KueGC=yPQFkc#m!ExR;#_xDU&h z3#|7JSI%>(h33pr;`}axa@q4Nb}=^@ofnQp=i$-pw(k(CG%^R5xjjhJ>WZ}fFn5@b zo(Ui8$6%`Ab(Q7u-6%P%M8DX*FqUWNTI1tUyX!$(yvdOQa%R%f`(e~(V<6kvmcKW3 znGhJ2#)ABMF~caHt6LT!6?eGEQulmj>$0?I&a|&=$tzc!Z2teu-xgjE3b=XNT+rw} zfbSt(W+^={u-R{FnPHv|!M{V&__!g`+7;)(_-hNa=Xtl`FSTe|m$gzsM-O^v_)%QI zbDMI$_gJF=gw4PfoZ`&96@wM8hSiIIIA1Z~cpP;wv7wcQ>dbQGH&|G{hj|`&3JX6) zP}^!fs_^6e`}$nwJ7E`?bB^xzB^gZZl|LA#3z+W^C2rW&AdUBV%3|tMsQX$ai zk5(<9j!PBN6qB7S;E*{rZ=QzTuh?;?lmP_iZ|B?r1$#EolZ}q#K8FpvuxVTkwb`vM zL?v|QyFurn>zpH?+$$)PI&;sOX(GKjCs!^y?IjM%ABzXFy~UeXM1WAnaFF%t!Fv=U1rb1gAP-<-zrvAIT>qQ z*RyXkhT^SZ>dNh_!&n=&&aR#()r5U~AN$z*rEK(BFBmk~iS4|ziG8p+tkUKEyS=B8 zUVU|EI(6GwlU4!?x;ui!S?*^G`Zw^L!*(zjKY=dJDW;IAyTz(@-?`6?^FkM9s~S(l zv01LhFk7!gsvjF9)m$7;J0C~WBhJ1LgwLhAtt-X3v8~`Y?jT#&8iI>I?v&ccUSw~_ z+QONlOm=TsD)TVqK8oluEb^tVbm}@k+on7R!x`GN*SPIj&DyCL^F0W+4jGO|LmWVD zk0Z99bDA}en}U7QlIe5H1@@|>Kg4TAu!dMA`?!zMKAUXzqURIomYXfuH9GJ)&NZ=o zz;1d|l!Pzc+EdZ;q_RWx)7ej_qf}zvOLCsNi#m;Q7hTu=fe3>Li0$M>_iEIH;9%}I zTdyY_*e@4$*0pDA4yLfApiON0UqRXZY_`-qI|pp14~3~a-PzHdZ8^_cB%`RTrj}qtO&dwnaYMbSfKgoY2@SflxfA=;5#RQn)cpf%gd&* zGqxd+k~o=-<8GJVmqxOIb|>KjQxo1_S5pS{F{Y?}(ae2uI{rNW3wAb!)B3k*RJ`IE z-gM4}&b*)IFE56pi@9gm>ydQ#vI0ZGKfv^h(QM3*Ei9t5Kc*cBL&NtfX*utAzMa&Q z^1>F0=KdGhmeR#2R>ny2s%g@QqovZL#|6-KYdG7&T_q2VI2(QOSJs-png))Ggc~L! z$iL_|*iSei8LDyaprJuC za-{O4bF^u~LNs5vfS&hH!n8zNTEsn^3p4t$4H0oP$5$qu@3|CQ^#f>*G6t9<{~fnn zV$NY2RC#&@Q`BApZ0bh7Hsz`{A=`N2*8^M!_Co8*J~VvM7udx2c-Mgi-rTr>q|gpj z-Vn{%Z(6V_IFl3)sxW(iIvN}Hg|(gI*yDrUrIso7IPQ-kP%?3|VOW@D3h^y+=~ImJx;)5eYYI-L{0>psTgcMR#U$6J=c`_HBG{Mqa& zBT(<>YnEd#;_hcv6f`Ir-Tv{~d~*c^zvDIkp2Muta5vlc#8yi0EmJCOE|JNsOlCU9 zlSNetLg&ht?51HDtZ6d?VCQxw^Eak8djw(TeMh?7^#gNNx1hSeO6>9TGw8G#h_f2@ z5Pz{18ExLPq=-n-LNgP>N7{%M77^0-q<7Fe-d8fL6PVsNIhvMUWNQr%fMxM;knmfYsP83XRd?jHhqj@nrK zT9TgjRnVbPtHkhiBVpk*?mIBO1QmsGsuzj@bnyLuU@8@0zyDOM*yA2LsWWzWRSz=TW-sN@Tmur%G;zL-^O5rwV z0(7>m#)rGS=-gHZY4;{y{OjEvkMVOq>|-(u_Z`9A>uSo38^+kpx`p-dmMN|Kq=Ltm zk+dpj6%Jjp7vJ=39Dr1ru*XtdA5jV@c!NSaMF@*MmsH>H^w4J_nFfixiNrRW^c zBn|BCfLH%IC~89nQp~WclGz&WGMSSC_PuAKeWop)YEuN&lMQHxZaiDvvY2^TnNVF+ z5S&TxjDHqxV5J|6*?Z3YOyOCi_@|ywo0u=%`_+*(@q6KXp1(h$-GfH-P*Yx09wcZH z;OHeeY-!nwUD-gY8d?S=Z=Oh@yS-uMeIwW?+X-=N0-)vBAl4v!aPu;k!1<2CT8PZ~6ybi)1CyJ=l5 z-@(<^Jga}K7fjGu!rW#o#a7P>7Vtz9H-vluJAOtkx%XMD9?n^yE&oKDy_=i|T#CSB zry7`oy@0G1bL?81O(usvvjLt{Mej2i%${fDW`FNS30I=Pl;_`;ZdVtam$ajdQL}Nf z&mfk)-5oP4Mw3}rxw2vt_kbAGi5jVEXnfyk_^#3r-#Bq*{laQ29A-|d>m_*Oy#Q}4 zIv_TU?kS$o{K@2b2?!UX;nwU7%$|ON#coNWJ_SwCJtBfVN&6`-AHD?LmUg9fs<_hb znd!8xFoiXX1IhN$AY8KVIQZ^vQbgbW0FP?qf`ij zKbekg%7pl4KXUkeQ0yGA8nK5u&JRO!`lzMadUiIuw@Zz3lG>v4s!*uwHrD0hgfq}M zc#L>VZ!#GT%b}&K5I8ydcQvYHt&FyAKS3B)jtYINI>FWi! zcK!#fZ)lII4}vm1UtJJ2)mhi=JhM};$rseWLHwg>G@BQh*M6Kw>ls^_)_>z@aH0ob z@iB_5!~+LrnZThAnIC zNiQqL)5F1Q#d#YR(Q@B`Y#z_lo|@ty4IkGP?^qv0l})Q;Fy#Q7=;(%47Gvpq$15R9(hy`y-3lXm-i}X%w5NX}qL*@?#u@{Sc>Fn@+bS!TG{Azm*SG``KN_I+z@C|3c z{g)t2%5`F6D*Hp>{r#~0p%FO^ww69_-R?5jyNWcteeu|a9rSyBD2mzT?03o`F?sG9 z@Z@vj-p=n8pNqMJaeommY_ zB2K4ooHsF;JW1W`823K!WwD*CC~5U{`uvMK=5zS|uhjy2r};@7$j>8<_JaC zhi0Ar0dZYdKX?^9RT0U*l~(c`T96Qpq5FQY(p?tJZi*XRyeuf+^g0OhGPTKmvYY~M z+F_S#KDd8>0F75>!Zs*`vrdbci*BR@53(U<$P%&@7UH6)26o70JKTQih?QNVaB$aq z;Mlf5nzgQmhcSSgHRiB0EotIZuf3w&n=|Pa7t@Wog)H%(2?dwQl;vyvsPl>0FssO4 ze0wzm{delHr&=ixq1GN18NX<5X$e{55bV++LUdia6|Q_=!3yWh0QF15nGc_>#9o{r z8O<{1eTgL2XOlPGeITb1M>Nj1w_L)K3e5>}W%y;ZpJd=1PC5b2(udYhaA*BO>h9u7 zIed<2CftH!lh)&ERj%mP(1Q#XW#OI$FTjLnY+JhKvD^4j$i zzB`%YgSXL4)qgr0wrM4ly79jH!=vJy?Do)a*A$#r-4@etS`PYdXwo1Jun#?V*O5Nzf-*Q1)c;;MX^@~dfkygai;y4R6=64T3=ymujd zJ?)OU#izygha&O4H@~lAG4%K^o)sMo#9<#h;fG#=P#m`c&s+YHP`;QR+o>sCc*e8& zX)4oPA_!-%X0rQX)#8*X=dsTk6??d62JF09P8!*faA>5GX?_W%ih)khcAN)$-H?G7 zNP~r~{=zPmcgO7`((ro6+2HNGMd4!?NuP#3We$-u@j{y`tW05wmM7k@JYVkN?2`_Q z?J~fY6Hru;Q&; zxqJZMv;DC|^z-`#iQ&T8i-S%uF$U;+woLglBcIOJ@C?a6?))+?q8*_NFx=J^_uSZv zj#akk?cXFil;0G8E}TnoKMv#LfwM@k>Vcc)PnYJb_zYnU?$~vo271};!SSy#D4nu{uE2R{WF1N!L!BB z)cnBl)Oa-fZ#-=AOl4-rf|z~K12%4T0ZwT+BpD3s%cj&OQPhfMcv04oy6kulTi0jM z^7ivdqsLozy&?gd&632)M*5g+=}499bg!K_ABMpBaueFkcN0GGJM7Yn`S9(DOc}87D(g1o1-$JTLd`rwWav5- zKKBSyjOx5c^4a@Ve0%yi`Gy$7#39wN%w{bP(~}GHA|F66TU$}(VS_H;&FBfg8#2Eo zP*GwHYKjDMw9yqkbqhghIsi(ZYvANqo}In3P0=+@Nk3MpE8lBGg6E~)G*>7jV6pgr|A)upH- zWvrsBvFgW_82s=kg>&~SFlp%!Y~eoXZTUxO_1HPIX|x+>B4j}8 zm{H}rbQV(^iAlYDIR~Q!Qssiub$xsIS$UET_^<+XglzC^KUMr(GmaVFsDo|yh^f-Y zLEglE*l=n!ba2*!_qGpNLKUDP_s(v}bL9La-V@DkM>n+xus@&I&zdM`8XCloW{Ef}PXSdwyqU*9C*DVHWYXta2uk(3%K81 z>V1&ZwHugmolKap@*M;`k+|1UgqH4dA;!W?3Y45-{NbK3A?qYtf4LSy{!C3tZ4e#mZH0?1b#a|mGMM|>u%d{e#M15YdcsC=Yoj1M(|yCpI$8YlXBr&hJg@eh zbH!ViO=;|tEWEU>5l;N$+{`tOxa)fWRF`tTtLt{Q<&P`dykr0!yF5@5d|N=zx--5V zHjXqFaIa@nHP6?N#7ABh)bOeg%f73GpJgVLyUG~q=7iI`y_cnt-{Rm*SUo!%cM&#P zEu;N@~fAj-jBu91%qh%NWexa2Mn+ zlj*PiA?CGXJ8ty(tkOB7NjJ-O!{Ebb*+kw;9#TAAwaM=hM9TX@(8d^MH~1N7`R#)H z#XcZ6Kg%-bZN>TPo#o^lnr{`lI{Un8fjVZRX)5N`hXHcBV zi9J28&UYwMnVYvGS)7Rm-;`V^G~Y~{`W}=X&zB)AjRf=bIH0|(jARLh}xUppAW&S9Tk7JE}^x{E=#r(pRfUt8{0@D5n5oQ$SB2Vmaa%Yn5oJPW-z{+Or5|VBhQg{lJtU{dXm(8bpaA9`h>W zj=_$n%2?h}1GZs+B(2IfBI7@I;FpgaZ|)rkyH4|2`tlsbj7%%;UMiF9*0r*mJVWZ> zc7pAs<&wXauQ+R_KKc#mk56Pbprj!mS^`o<6RSvRQG6Uc{MR1;D~_QRoN*FQJpYzF zl5(8v>GDW*<+<`S$XXT*MGM_jejmSz19~3E{hD^vGI1i!wY8(?tGbK(o<+0%V~lA} zHv@9qzXu+4ya$!esrdW1gQ~731}FZz0XvtzmejN5ba&e$TCK3A3*!|uy0!sY*Bg;b zcpn(5GNbKZhKMPTtD(VwG1cE_4D=0QQH?FIAdvgSd6r@N#KY`JzPfT{KJOQY$+7MA zu5|t)ccR_~);+a9-SeKw_EnsR(_2dw&AK@(@Q^_JHkgv@PPuZGtR20nU&qXb^DK|& zOvX-LW)|`7>Ef!z%(jCA-t^gvi?b`mk<(vGcP@D|_mtZZT+#&cmD)9H}IZRlX1@BIFNdi8p`pL3scozLg}Uiw^+*FTxT9jre= z=gU#?x?(Ono!$y-_T<44&N2S|=$07k;~~y{z?q8uM&c3+Ap4qO;OW^NjjJcY&zE<} zs9J@u6CyLaDF{-ZzBuOEKQLRNBusyPOx!WQtKu5tnaPkH)g<|q@FDQ-!t*? zPkv5P^MDw>ro8xXxU~MC78VWchnF1lsrx3*5*d*p>509W{%R-c`p}A%_2RsdhXZAg zx*4N__YA6rXTz5J0obvy1#-P&*t5q)^!=eV`}*cN4NwZFj!#^%vVA*TSYd!mH(OwH zd63la@&LBjaS097b>(iT2NcxNp1pP}h3MoU)-^hmW_{6Q1-TRP=)|LRdUXirkf6@a-^YcN| z7=G8W!3X=5e5DX*2$G#wmYzk)5XONN*VW`ilQWsL&$9A#t*Cm>`*o2lx3c@^(m+-7+J6k{4ih|!H z;J2_tuu7%^j+P0`&e@Gp0u)qv)Qn|ch{QaD-_G#Vs|R-GKFpmTmqE(Tk?6VpFg!0;5+bRR zbgpMBTCHm-%&!hIyLqr{C&tt47zfDswF#RFcF3;W`=J=uk>ED9ie%{eOBW8|^C3{y(W>L5gRv)y6rNv7z zeewa^(d8;^et_bpC`(-0Nl8A~bP&_oWrvqXxI1pmFv4>?GuYn7RZ!#FkJVm`$KJ+f z=rVdf>@!ti;}!;z3cpLVJXQ++$xBH6Z2$_-`(j$I8+7OUOOJ;(-2FHZTbJ=Y`!Q3t zsare5f8%*+rwy2OFOrROSI0iz{wYqFPM|Z}597I0LztJ=XYz?pW0!i@fP-QiXkE3z z-Y4(UvYRXE_PC3Zd_{j4UX+Iw!}ehO0$Vs~p^oJzRfV(jb~A5(TT!+3C=DO*A5%_h z0*8$2FsH2u51#+Y><>SIl?L3kFOV zN2i0&hOolN4YcCrXjJY0OR_y+DjnghdacM$l8>!29A0n$_8prJpLnMGh?hFEm=#B3 zc$R3?KJM0?If!-*s&b?Ss+=`a>G&W2OwFe$2~MIHLM!iq{`X3JS#(bvK0pI^zVyJ8 zj~sFTbs7A5mJcQ`&(ds-IDFu-N78nWru_StBvYe9gc`2kG_{!wV$V_Mah7au`2kw! zWy7+}mXgZDI@UY!J)0`G#UTS1vK4oC;`FLLP`~(%^rNs+RA1MFd?pC;Yql;}6L*{Q zeFeE$e+yi&Y!6-%PQi=7Jr3-$HQo+fMR!Wx(fYCB@b=^ZyfhZjqoX(EK2gWkhr>nd zTbEgr#U=WF=D7H2=~Q<4!D6)TR*U)vy0F-{B;EBkW)tMKF8XqqAOt zEhLiosazbdzg;}{WsG>oWiea0GltIRt!64+^_bN+fnAnIaHo+a8@J90bq6S5zvF() z3UjAEy_MzLWJTzx&ggfaagaYwMJTsYX0}(uxjV-h3?6AQgVCIy6=)0J7|$X4#*2Tx znq!w33l`Y7J2U^0jx{YE@xi+%(y0Ob-rz$BIj+@nis*g=Cd^Kk1SmmqAx30<}GHnDIrKt$SsIe=bPmu`Wm&n>ZaG<1I1B zwFr~P90q63T+zSxOmURYx&Ny=E%iNB2u_{8!@n4PytyK-dVxWkB)@VNhb})#4*UAS zFq>3ZdTE+C-+m-EZAz!D4NAg)I~u^#aU3ktH^ODAyA_|tErjK9r7-ncB~0Dg0XsHY z;-K;=xa`bqn4kVcY9BJ0^$=@dhY@F2cG|`U1~x!a?`Cr2?;{$Z^Iv@l&zl;p%*`>g77o=BX*`xgd!?@Oz+_A5S~B z+IX|Nc{$=!e%4b=N}^tI8i{dzIgJ13#qG#h-#&+!h`yo18h0xJMPV`1D)nV$PTy%eUIyscyI^nq9s*-v0f?O zKJ`g58mxxz#`eJgh~a}sI@BAS2n=4snOJfuZ_y{ zZqujhJ@K5ob-^L}_3i*weJBL)vfJXkl&56)Up6+uLn?_L zi*<&%Y?7NR8IQ9O7uNW*>!J3TG}{_SZ{CE-McdJhzhh3Z^P=hPW-)BH74r+sls>(8 zK`XOgviF{GENv6_ApBWN>y~+o?%k$A>62%0fwNuAOcsGfL?>ES_D+oTnuSrr8)<-n zK;E9div97*LT$w$I@`S!#0Af&akvt8tdFPhmwDbJ;u{P*9z_ENg|h($`DDH4uVkX1 zM+^A#di2E(@y4o`WX@fe2RYlT-0TYI2kEjANm1~wDqS2sdIu!wtI1Eyi(oTzw@F!v zA7T4D?uojkhN+i-L9=}_Y$!A0+_b|m^}#kQezJ=Oo^@qAo`0f*lNUud^O@|}g=Pv2 zx7gL$4k*A7o)`v$gbeT>)HI_9E=9 z1uLo@V0BO$9*c~Dvl;5_w@E{_>Sr0ja@N75y&<6Z{S``@1o?iRJ?XCzBj(&z5o%gg zS&wc*u~XnQSZl2+2<~p=)8Z}0r@xboAFrZ52e&bK?k~mfO-JcMsV=*`NLknvqDA9;*oPMXJKUw5hCoRyo`@JV$vKw_>SHu9Rw}%TiwgyO?Q)zBwv#Gyh+Z zdR!nC^+VtnJOc+r>9Q-X{jv5W=c#@z6@v@EQH;ZHX?*`Zil8iCG~@0#c~}=VOl5?4 zW7BXJ+Vzh#=~6Z9to#lEcJb0&J0-aWXX-p1yb22jnX>jL+t{4br|=%nZO>3DhH@um zdET|Z@Jo9;dnZ#BnnIt!{^8%~-ti{sps_(rOOm{Yu<2# zB{H^NI8ROUu0yw29X3*LJUr~iy$=?f*&Q=iI9eD%hq${!ErYvu?$t5%6OW|y&^&z6 zyc6a%8_0~`a!2u;JSxplMi+ZIEz$8~cJeY(xo(7ZzguArzhCVaT}3m)nxMf&0$+Y!RXjfFKp9&87(xM1k<36nXb5vRoXOo8&m~s+6E<)00S%+hxH#~G1S@*t z#oV88IUA*r+y;yo_6009J*E>Eo!L-pKUT?e;A?;BqJ8XZTJ*aD2dFkt!CfB&`&VM? z;=@>MYsF^206aZf73?()urbt#?dDwNZ~W04k!Oqh2IpX2WG}F88H6Ry1q$~^N;G7? z5A(}wA+?Kx+0D)QsJ?Xo!JToeVvY(Xto%a1cvk(;$9VS1Ifm`C@6FO=QLsHF314uo z?#N}TLP|-h;#uu+@oJti`0&h4RBST+l(?h0dnfGGz%xBf5#;nsNl-q@pv~~JbmpQq z(`$G_J>JO`lSiw_hu^p=yHRwN7Is(wAL3`SZ1MV-|@GOU93)yhmPemaINw)qITw%+*M;GEQ# z=OCPaa0XX@J$!vPonq_x-_m(A8|k|XtOGCLp4fJZiyw>jfi)t0%T)w)+(hmnS@iDZ zN$KUAl{C8{Qx^AIi_b?pF@q@`*oCVq!j*_!l=$fobBs&})x2bQH0wP4nCym^w|Bvw zshi;4q&*N`=}q}J*GL;n&Vu4!XJ*|p8ZS<^fu>PP@{t48m`QD4)|jx6Ie*e-vz9!7 z#?m@?-NJdXXJzCY;)d==weZ8gK@|PGPD)stirZ2xnQyB#Yn-Re&NYQo!WR{JMARrc zX>kf_H%ufw&XX$bGaE-g8UyNw^zqtyRhBjIIi)(7pkmerF)%#}Hu2fF>_8;N{1^x& z;V(e{vn6xsyO|=+D+&7te5TY?K%a5^9j&Iu*_po5+fXH8&N*jh^wSUj?2<9f*5Nez z_FgPA)MI^rUxURNoP*<)jn3;{!Tv=9@yL}Zx^*dw#`6xJi)Fg#8+M)kh2`P=o6T^t zbPb!m#EiYH<^0dkt@x|y91L&J!~suN(D8>A;P%azKCg|Sp6#xzENv-<7OK@)D$+h{ce9>JkR;?k(3$~WSTUA$4wzW9$0Qg?F4maM?@~Pd=H!HuMcgf3u~SJpDbCyf6^g zZ=8>#13%K*|G1ZXYVVipfidNpgpMXlSPaO~VJE z$EBgHvP=z!T5n>90=>C!;S{=?6|nAUdlf0YVx`*4o;WTOu*P~Y_44^iYm=*K`^-#u z=s1Gz54=WU*`wK~n6P?%Yd)p7D#>#??xNal32a=`Woex2e0=Tk4bFY2 zK=*g4w5Q7euo`}aT+XOC_1SC0=7%1m!MInPzEW*s7pM7<^{S`f*v}U(1$2f^r{&B> zg?sfXlsHSqP&DUrmTUX!$tXQiF|{FrDu(eh$mBC@EG(5CC)G%2pK3w>?@4&=Q3D(g zj$-Lg&(R?MvutX%qq9W`EzC)I_w0rajv_jxdEXSw82DzRxdUx|hq^FehW z=k`5*fX&9E>062=JNaq|i$6G-{oVVOS{oARj_rM^cfn(*DdT(7XeIgfk}R}&Qm44f zdHYS48W=t{fjT%C;6vU+OgkOVT9<#2a({D<4c62AjODC!TQ*e=n2($9c4fsoli5uF z>(Fmy6P>!x&-Fj&faZn-s=uZrcXhTTyP@a65ah5!e>UnbS;%fa^=9dJ8^~*Qk<@8K ziz3zXG%A90@XFVT_}Xqd-R|5EH?EkCg8dHk>okt7U9BR#_V2(pPb^@i1ub-?T~!{M ztLN69zpMIt6I^Zmfqb@_vCZ*kVP(WwoDdMBxYc_ArM<2efADNy>~Saf ztm(zPkB-J{ZQf6Ns)h+8Ot7dRo^2j!%e42Uvw)HS4EcDDjeQczH_I~A1jns8nF}cuFTAL5A)P@WRs%wSYc}dYa1oPIki!&BGnfi9w-Tu zR)(YOQwe(9j{~b-JEfAQ=d|Q_39WMGZ0HChS%JwT2sF4%qn8b0QD!g2OcNDhUr7%( zq%;w2`?b;WMVyr%^^dOg1L)(nofW^(V^swQKz*Z%TxF;X$NRRC-hu8^U*AoZnd*fT z$0RYQj6%$;bYjPf&w=9rp2afd9sSb%EYGMbMm^|&3A3(~iqB1o{}Yae{jQ?w8f%=A zz7Ws1k7UoTw9ue?$}swGe=Oqc@VQyM|C;t%GLtW4DojNv)%!<2nlf>@6b-koCE?(o zjc|*v!xe_tV7qFvw0(yWdep?zJ?Ek9=j~Ip|DqMU((c6E&J7bc*9gM%_*Am0GDIt5 zFQ)eEAMI&f#H@L~{$5HREgU$GZO>cBvn4>Klap!wqDbgzFp+sgR*)0fGp9R(pmNR) zJE(kyo!xvShcyFOhnL0Jls%HBj8KxZ4Mx=AU^sm29e^u?htrZ{2Pn+*H)nF6Q8ax~ zXPTE9p-1K!3gvs;lKt^)c4-Dx3?7W{4$qZ_x<%9RVbvga%w*5%3&pzs-oZ!vGT5L# zh&`(vz$RrW@nT~ou6Nzd9#+4Bn{7R@=OQMh@tnx1;BYGFS0t%>^M3SD3$(G(WGQ@4 z>dkvgk&e+YDWIBO9#dlSeHOT4f;DSC=Y$_zcfm7{N?g9$huL^-7GGFR5f@EwVH5m= zSdd~jo7lIN`h;#{2Bw>sbEkv2XXIddV8-t=tbS5UyAhkQatin^8OdI6F_bRKD@mi5 z4ywO>ho?*%NW1KjWFIqF;c@OBJd0g}!Ml_Nd!3cyC8J!>PS8fpMJjTyttxUO$4Ib$ zt}I{q-xmC%90Aw5O-Hl7&N7RAWoVJ^Px2mj$%FG@f9N}l zJ@7+XwPJpcX6~CX2hUsG;ZvRq_WpVo?j9OS7C!=|{y7gs)7QHd|9_5tsYK)S^+VA= z#|M)iFN7&+oa^RZKocGxghvB?WwPc`hIrh-W zCqm(~XF9a!Xs}z)r|}NqA-r?67~jV_uupMuRAuM_L3P}nKHx2ca-N0%2~~O078PMf zX*7FYeHjkjo6g^<=5!4P;A>^hzN~Y^H|f6>E!&JSv-}(zG3XHc^j?#8L~X|QvHEnn zA%@l+It5SHpQi4d-=naXs5x{LPW-%ueNKM{*#mE3ppur8{^@W`O?JR~Uk%pSn7{%P z`pN>&hr;Sx&GcaW9{Sk*GCuOFq-6hMFzm36Rj3%U2mJ)Ow&7u><69%@@V(K*rFSUJ zWtA+mXcpR3DJcD$KsWo`VB1W`p!L)g)^W@$Hv5ZlP2ai<=%o9VGDbGR%_n=sM_0E% z!i`wUnvsd2-}xD_<*2yilp}jJ_YJF@X2Ev*ECYSpxh&=JcQLUdgz1bNj}wmQqM~0v z)T~rtr?sOnM{|Z0bj*hJw-E%IA_x@?+h8@{%g%J04LirgP>NO-{CXwGKMG9z%De0h zLsWUb?hCmNpTZ2!SF_qaOWE=fChW4n*{D67aQRtv9QV5i&iH7DH{Y5uk^dg8llmZx zx=bdHv7%DXATTZ#*cj|7Tb0zA)px%nd3mP8rT=$+J0*zT<2aXn6wmAX^SMCG4?OhT zn2jIZwdS$uN@nrx7RQn;jk&OMUtMplMaH=#_PZt~Ob*7q-9QLr4{t|2u)Q+BsHOK1}URTU<(A5>h-P`iOX@1b5qE`AO#WMYm zUd+gCIrCVP1wz72QNep1bIyJf!^L#cP8&l1r6W7ZISG-P+H|(bP)aXb1U)DM{~hfQ zc@B$3znH@`rusPlPN>n=(*syv-pA>6VJR6!^`?->dePz50d~UY7Nx~-&-KC#)^Xck zX>zBRG%8$>zdN~)_T-nd%sqD@wrj9#>eVLDHvR(9QwFn9hDNyIKqy)Eo+i_F^hV_~ zarAo48}8B&G$1^bd8DQDdjyg==`B7*mPT2{+5?yM)b}RPnuss*+Bzl{*uqN#8&a{bSnhCcHb?gm1{<`y z;$m6)(PuDoL?i|lcV$anp9j-xoYi)7KUKeMlugsL*YX_&z9B^J*o9ycpFIHK4nR9ErnRs$C&iNE44O1}$ zr}kYeEmIKM)SeRtPbI64?;SH>GTK||(eK$E$np0%8sphXYW^1^S&!<7->Y_#sc{ue z->=Em^}Gb%p5#iuJq01-j{~#uG@;tf&*{V=&eu3I5H2+LWIKYCg{$R_5Wk|G^zNC! zcEw$Vx}Gasc@&KiuOn!D=}@dIs-$~=lAy&`5Yk)o;idOp_?EX`cJRPD_|a=2)|!U! zeAatdyL=%#;5U-~B;FGD4YS1OyTWW4KgP0%I zm^FVsYA?A-T2HN5;x8Hgcl8nru>B&9yy3^#<_T;_ZxaSF-mCv{!hicP-fnW#a=?@~O7Z~( zVQj(v<>IX(&I0A<4U51KdLX+_t8SfSW!$Nfdg?4C%a!C2g;wI>xiy{4#74&$U9Xq0V69(q?V4=U)%CZB0OXqujhNXus#qeLY+y~r& z4O!8##<#)|ou_%j%4giGS<<2~?ir2;JkLQwq9?S9Gg06(S{;M4pgmBPc3&{Vf-cpLs-My2 z&~9))NRYe8%ccH5=0Q>9W~jfkU2I(232pc7lNof=C-KC3Qd!C~b|$7Qu=gwK6JUT( zI;6nQ2Ont7rZhV4HAkA4v{J0@QV)4nN12)7OG>G*r%qkUDa6@QD!h0bPGzbJw~o%F zM1!s9$r;OMc&2FZly}np?JMcr$80uZMjiAVkx5R{Aoh6EYRJ2zhr4->qoQ9TYafHm z-uVyJ)Y(Dk@NJk7dr&e8aietIX7XGTi|e0zh)R|U%B^@rGwoh;zQGCc_|FHd=}#5w zG3%>Tc+DB!m>d&dT-`>$2Pz3YgH`xZb2$15u{exPz<3KeW*i^OCxSKscu_0-1OH71peNwTz;JvZkxvP3 zRvH(UN#pAQuyJ(VL%BsxPKio4DkVuVak#N!tc?n?Iqp>0jZ@f-7v} z>F1zG@*&0HQy_DTXQxcuMOmG_^on-{Z*`Dp{=~lQZ}2M`5fRM{ZkN%;k`(&)r;b9p zb-~-6qH*=OpR$R@J;83sISN(wp>th#!-jh9mavSb%XhWWW0f(RGJ6r)DtBdXd?tYk zzZ09+`-imitq-0Un}UTgBUn~W0_`afgigB$vQgS{EL{^V4SlF2SMxXxN!z2)GF--% z7P~V4BPP(@h%*gjhrqD=PpS1uAJ*gRY3lu^hIT3RX|cPSY|Gd=&{+2f`sus{%|Tb- zuW|)Dd_a{@W9p3{d?!qAMz$nenXCFr|QAo}{aKmFWcmJpe z*E037KX(!=)_yN7jO46_v6sQwR${fw;;@?MDu2976o*aRLx28W2A$5{IJWo)x%IJB z9N0P^k~1O{_tdtFBN7^=3Z6Y3J&pIFcd4`2Cn72Q;1m|Qvyu9_B;vn$uUONPNH$*W zAX<3Lpc_Lri?IQTic0tS=+*jH(t`1dIQI(DIH@e(Fe{811P?)<6LzfWMcRl*NfhDxtuIG4kiukFfFaB>TZ4(sD-p5OaqCcp^r-R-wU1oI=1=X*ZocRPX}{grrt~P* z#aWyAFXR1dzb0B;|C#0fZI#X_$Kby6^VrefJlm#{je|sEbhy?J6I6H5!tNvR4ewTV zUGYNH=%Orq$_-==DjK95ID)j+sL0V(Nj_noGRm8k<@fJ6l5N#));;H;LZc;}dmcno zg3;`);-++`y$EcsFUK3L5p3MhiCA~V7fWvHqW7g+wEE#{7QzfAx5W|geZ4-rRrTu#Z!8Xc(k}Jm?3ZcnEf6sKh4jvkyE@i|!cmR;VB!Ck(u20L6&XBp|G^5k zc?0A|+hfDmNeZoX%EHaCA+Rqvg(~j|^4q+p5j{_t9m=c#v)xJ3+KLJ~|KU8nsuTo$ z(=+gOQ%!Is6>nS#>K` z%{oiD2_^9PqaW&A<9*7`GW?m4!xAk_Sv+S4EO<7PMKb(1N+l}Y{au?#F2+F^nOe=oh7Ka3RS33?dR#?=|V9k@Q zvCXeix->0cicZR8ty5$;`bV><-Ljrlb#-&xZ61x;y~m+^cZ2lh(p+fc%$gX}F0e`O zI7>85L+UV=&RC3L<059^@bVs{Zs>)p^ahcCuOcbv-w|eW)|_n~5{XT2GwJFI{y&Nm zgrwV1uxy{Qe7faQSX0u0IxnF`IOiGbncGVk(}u zxBBF6CHcTmYZl2lhsQkb!_&AZ79VE7a=jB3N7nbkBO^4~+{R2)@mmJ13D4>8YZY`m zeq8k7>)tb?ClthaMEd4q>1Cf?P{3z?8@CHWnS30?jIw8c>(fYcdnd%l*-H|Z( zoj!ZgM;U)6stASsv!QU}PFm}gk9%9+vDu&X@!YenbVHNpGrCs61b&Wq+x#6|Z}(uH ze~Xx_%XK=W-4`n6r9f<=S;(d^V9hvLoV_+Zi06G}=$M7pMmgS(zjCp4- zH!)T6DiL^?Etst5l(68n+{I8PFs)856xDJaCV1$xGu^et6V0~hwITwH4_>5UXWxVA zhIzq@z!moignIiDe4wEYH= z&bgHKj&lY4zETkX4D^jUVaatBc@Mz^6V~0t=^uYV)mIR^1oL5P%pz89d|39I_i!dW z;k-3ffzrQJfYWMI@xVt%aG%O^KKUCdPOk@x`SgfpzZ*%zrfy~XHV-4O??w>uCI&Wc zd?QBYyYkuYB-Xai302!U*P+sm{;Y~3HH?$))akPaAFarE^;R0o?}GjgD-mmqqABMk zA8@u+(i6*Z(CFMBH+Wr=rZpu%9PCHUB`atycSKhI-A&PUoiH|E5bV|*q(|yQ;Ya3l zeCRzBXF8hFsJKOtb|9NvJ$+ed?RK1Qoxo(z7GS59yXeHk58^vFC+5W&&2P&sndfaMxHF{mU-WP)@_&tG)o{(MXW2`C79j5?A>j60ctBO!ps>>{#$?T2|;sZ=>dc zZnG(zd?2U%;)&$)e3dxnd;qEXD>3I&M?|@P9d+9{oYK}^BR}1(c;@{xS;)g$Q0W&% zUU`qCk%L|6!KAB_xKI#`Mz3Sqp93-VkpQpmsR}!0jHB|;XGy*~2Hy@ECdTddLWAGq zVD%wA@YdOZucj}OU-M`8gRfE9fGC==v5@T@q%2$vEMzLF zlhJ+W9-3M2!c23zVD7~>$)cJ+=HZ!aOiLxby-^9jUx&cao-3I9rW@`U`V&g|S!VC>A^70tS6aGHMQA_% zj+TwE#*vF}N|oEJ@#DoUIJvq>Rx~Ray|!_O^{OI%SGxh%;b82(^#b+u-Gbe=S)%^i z4{&_n2-=jnk-hp)4eM-TsWhNQCLhP2nPLNI%^QoRH@AuT%ebfS#sQe_`I(}t@57@| zS5|l76S!Sfp+grRLa~v8wDh!bb%-GE{>Fd>{Tzt>TejlsXUc-;sVsluDPk_qQrcJb z!f(B+MH%mik38T@h1YVRHhzLS=YsfV;ECG7sRx41*gi9Lmn;>y7~_;e+o32hxtC%1d>tgRsU@Sg9NuD*Pq zR!CpGWY8hl8aAxrtc#tTTePEvI^OP$Q_tA3_8d)&ZPH>K)~1*!)55hbZM3(k19RIb zu>IeyQ0Kui8nW!($tO!=;eBijcn!Wy$6ohD@lY8}U-*j}%aZVjuLveJ`ij zcvQMNzLB=26oco9G-_*7W>I~3-eS~GX}WL$jQT0z()s%+a#FQu(#sd?bE>5&V_U?I zPt9S)IXT%GFHmT0?#Oep?(9?G4|2-TVE(FRcwMIt-QB-RnyJ^1>VGU2e~r$hJ{|?^ z)13)y{(tXjSZ*zJ=kJDxZc4%htum<5<^8Nf2CP$&;gGl)_GQ<_vDzEqV_ z^7#EQ<&?lihles3Ne9o|(xO^^&-mB;AY|_T1VZB-ntA#VoS)t(*(b%ZU0r3g_USP= zJ2@6NG-yKqn(MGZo&zUiec)B#O!8NpqF)Kztx~Lm#qa-6psgV9%|9d`-GkyWugh?E zu?bc!dqp?KB;n<2=i$%&V%GfH1Foa0{Lh}VlGwZ!J4*)GYTXU)wqGKdcd6{iMqS)9 z-5f&R#EC`$q2P1RoJu`7@A1_%THEuAWEpC}JioZ&%G%yopg*5Ie5Z|lJq}ClKUCx= zjtjz%dEvBr?omv=bw*MOFoqoyZorFuogkKbyH>b$!TIw#;dQ=eSkXI}Jz2Ar>G@qC zb1J0l$(=ELaw=h+AcY3jd z1v}`H*J->}E6Cr@Z~$YMKeB-ulf_9}I8*L^HP&s|FKW3c3zB;kxWB4pJzoY02Z2U>0Ozwi!G6wQ^gVX~OMdjifWez+uaO?*Q8b<`{zO|g zTCng0Rlz?m8y76j$I*vvDa~g9IuuQS8$WNzYL}a_ixYTPAYc*}#2;i!Ey5HxeGX7! zpKsDn-YM+-as=o2HGy5#UY6Qv4rH8pF0Nvc)ZW$?FP^^(J!j5i9ggpWe&PD)tWrR& zgM-DgsUO9Ul}hqy4V|#~LKZ!~zmSf9A1=*wGGq03yHgB@%8b!$rj<)~;?~GK+J8ZZ zrCrO0Q;P&)m6@|R++*ji z!i3tDlHW=nyz=WRIS)PHa3}nX7(D$wsWzyxhvnHc_{=hKfVh?X-1jK*muzA$Hk-55 zt=8=07eQ`1*#yOt$4J$9Hw&9;$X4!J2o~8l6|wn+G<>E#i2AeGKRk%rJdaa!r#$N6 zZUa7%36S}HFv`mL`)6=2=(+Xe`u#Y_!8geeK{zMA2+uyGhzIE6C-4vU?(hoR<4o+Azk!E+^b z(yxEY@`Tu0v8X7SP2lUvhVVVGH{+O8;V}U&MKp;^2i>7Krw_2fVj1o>+yj%h$l1H+ z?r1k_5(f4%VOv^O;wsyLq+-_@&-ohRb#|QEUfQw2)3dPq_dF?=yIHI<-b2^cM^y02 zQZd%0BTo9#8DxI-V$w2S+WTl6Myh|34yvt!qDDhdjTuOmFE-&*-YePZZw^CR22$54 zeZlU9Am6Mc2vB|<&rDL5-wocv=JiNq8#Xz!Z~R``BijRqrwzl#&fFEz&yHpet%a~g zD-5yAxMgF$(aT{69JR+)J zz9UU=R}%7F`_hLMj~wmR_^>%DJK0=5L#unS0Tz946X)-&CX0bl@a9<~92l)dj`Q1S z7Iy^wd^Q-i8+C-Yp~J;3rNQtyKOC(G{Ra_~`%~(S$Mk%a8n~bZz22LE(nDQb!sn3< zfx0X&BnUIl3-SjWm4!|T6nH%(!cFFuMDu8E-BwgxnEzCiu#e&V68jx6_hB-&+cgHMn1xTj-| z)N-^9+Q)RJH1!eKb=(^9iLN7la#m%{r($7NUSIZt&xLj0$4Co$O{NY|Ck}YX_wFwk z*vmGdjkgU={`M1QdrTvxS_kH2T2B$1OtAI(5De2$mgoBMcVO;*@!AzZxH*~6rJFxd zQ&cY;c32SRZ@fUe@+qEQA-ba^Y0$089YI61Rw?_oIY z`xSb8c{hzs>4!m~-=O)aAY7io_aD~b6tlb+PW{;;hSz0CdKqs);`gHv8t`?_4}1zrTte@Ez+b}6*<%vI-5Ojk>keTzYrZfPFi}@nNnXw zN!H!1abwjEn8lq5Az`Yl{i`ZcaU&3S;We8J_%bhz&wTr!=gJPuV}KQ|sZeFk*9G~E z8wFy2t1po3J{L|n$e_y5m^~aJQCfpBW1*bOyiYFGE{ueSU92%Fh}`Rpw^oTq^?_n(R-Zf_LYM@8r4Q-x5_F>wqo25s3NpQ|A z5ie}L1?{gj+1Ff!)Gc{C4yk`5-CHHdlRfO%%)la;JmVP7+cFX_to;hL_IsGzSXrp+ zJ{`hNYm=^dAp|D9rtAHq!9!13=yJsr3S2p(^@bHKIie&4Y#4+F#U0q~o+`5b-|}(L z2tBI0ts;MwuPfeMAs4UD>p;5-yzx@^L*mDw@9E3ZC*t20GuV>WP6L+;tVYF{o~J*A zqaBCC+vVmo>$(rFkomLX2t(Q&bAnDPkD(4}uIz{AR~TB%eXL9TU|)M5+MmUFEhCHI zOKy=U-Fr))4|1N@=6krYd@<91l%^Q^jXNid>d8Ax6_w2TV*Rsf81?^oop&w^?)*mV z?`)3h>j#q4I{}0H3;181vY<3&D>Hr^PJI{t0i&ajMd|$$_;xm*WiHzd?eV?ICD@wJ zG3K$I=7Esp=*-3hj3uSwD*Ct8>i>)$c}DpdD)~~(vVWVR*D4KMzajxQU+KfF&!`Cf zGM%92W)l?5*-wQvw`j`nvzX~IklBJ7OffKIx?_@P+?XD0pBnGb@BIs&UdpIgy%raT z7g5Zpx9~gfKItsYM9&AEvBM1Be|zP}ri7iMp&bXovulEU>-Zc>bnlJxTn@s{4-xF- z{@v1fp6wm^K^rI9j^hrflZKBM@&>%9gBNMY<)Mq5IBknsdsTtys4NXN9Hm{m}{< zl+V31oU@&D@ig9WHKLD&GayCNfazXDY){-Ijf~Rd8E$PhyvJ1BR^DH1zj+3HeS_GX zZJgVkGn3W&HGt?7#L@#)1Y_>ji0Yxjd&sKt@Fy3+X6H02yPE@V{Wal9#vFFUYzXvG zJ3{VG6Ie&RFSKC)I`NW^FYCK?8a{n!fy=o=;nVnzY|vPDI5RYWm zopTm;_2X>OeVpN6>A`$Y8?x>xO2Q8ZRpEB37HphgiGHg`<5S(AV$qW{_*ASc*L!di z!o%;0Ll$qs8ig9_WM{##9Sfia2VnQcR`~1ZOqX*w|7if{*0@K|b-N+VVPU3N?D|#o z|Jx51FG*(mJyUVYg!z;we5BEiQ8;pS6FswSm))HE41NZ$hq)sPDaHF2-QS%gMijl4 z^@wRDA=)0kZQz-K`Acw7jHbAc_poh*1i1D3KP-&hBYFQg!+Fd;Eb?KMg>H&d+|V zW&>xJm*~(sXID59FjzXlyB2pBALe(#%~WK|q}w?!rO%Vc;rJhkICZryi%8l5?u~*F z8J{DjcbO!6od20dE(>8EC7n>m;Rapd+<~EY&ColomR2h7V(}^3tb~+k*4$;-*0}>& z9Cep|KjWuw89gm4}-Ge+ZXvx${%h_B1!*C^YFq?b$1ATKW6@Q<5 zEp`J(7IIFVRwdt)bk=reTHD@JYTkSCmX8@c=jYY>EBtv~(h&!~u4HDJE|6KyJ3dR| z*yZ~?TXBQW=z|#x=(>-bTm<>4 z^RDgMunhUm9Nybj%%;nF|D^W6Vp+}RSZQ_FditWshsTp;LearrWL3=hgG(zVQ?D)* zka3*NwaZ}Z^0X-QEZ@bLTmkES$LQb1$>cw#nl7*8e&64wEIJFpgfj{H8|UNwha%)= zH_1mfX49|DHbnIf%rR;0!%kfTZdw?AG482Han|iRM%+YwEvk4B6DXFu) zo>bn%@1t=Y*)qFpqRZn<=|Yk|vuv?t=Z{#j0|x0dtCJyfxvhW=75OaR`ZX;mnSd|U zL*&lI)94`IGw#&i1K;jC;<)zZXuH*n?fGNOrYt*3g}m3R`qPM6uWceD{avtik~T%# zxv=${1;x7lkAWRo!D<#7FpxpWotu*OPF~mF`j_&y>e9&S;1piQl(t z)2Oe29m5Ri!m}K5zMerw0S{p4!7w<*T})nMWx`AM=~VuuL@MK5|4BSkG(Y?UwS0L& zb3^ywEk_MW@6S$L`9s8#O3rSWz5?>!MToyX4})^<|NY)W#?k}2D6e`cpw?hK)lN`V zWO&|VHP<3R?AHzY8*qPK*>LQ2Y!RM3UjZ8?AEYZhi{s(llT{y0!>MPjvFGt%C>hrY zKVRF*wSba14hPt189KMV=tRJ_du0K0q*1jpcKX*&rrM;> zM%sms+6{!$G-cG@e*1D4IPD#h!stIA?&0 z!ZP5Yw0}_`-%Id}&t?m@q1FmlTpKQRA2FJ~Pn1(&RxTYH_ftyd?u;t;9V|tIce~L7 zYP~F3z{*&<)Q9I*%q~;x!7A3%Jd1Susd2C4Q|YX;4FApIy@;?yY&`B_U8eM52fT_& z@hXQsj&-3&8iO%D@|kpNzz*mmJfnp%?ZH!dp5HIh!Jg;VYLWzG{&v{IBah9hR8>S4 za5tBr$>Q_{#k@8DvAd}uqFsC@T@{aF-{8|=vA_=x^xKNhwPY-HODeQkD#eEn|4MzQ z=av50pf1LAQpY>j+`%cOm6%zHSee)bmoKlRAy;ZCuxY+Hdt+B>yQq#sd>r3#EeqlEVTUcfV0uqUeEUd8iZ*v=)ng1WP-7)kuG&q%+Nmn0ncSw$ z&$QXvnctN5_r1}1cXurBp@lPNq=-KEPtt&UvuO7DuGAy)CdIat)6ql~h5P6EY)`*h z8j~Uu`tveQ~HeZ2m-`LpoT@nl!bXQ6-TOmV(iEc|oc4%?i9Sk(O= z^y!l>%8LEyd-qt>S!j-D=7nO#q$}{)Se<88m$~kBn@Wbxx@>&-MXBSILr@TwA$p(g z3x3X1@UpQgQ$NKWq%Ssr_2ad)Vz3%prXo}1b(0D22kiz=z6aLRvqnF&m-w&HpIO*( z-qNB#IC^#)YW(^jWu01teHZqD_w9{h`drU(2724d(a0TXj zJfXuCBJC`kM!jn|Q@Btav-~%)MOQf=S(QJ}tF$xgIE^zX4sC2cN$S?S2!CZ-YN;DyPQR-G)GTj=^Go+a)YUN5 zKa?v)dUU|3StsdjUK(}L=HK&w!`QwgBTVW)4rlJzPwFpQs7v?GOl+){^hVyLp-BaB z^Pn2NTwO}Odj#QbpnLtEx~P?;3%DlZ+u_D!)sn+fA+d5Wsy_ni=O?C?jLG2t@k8qcPrKy7;V-xKASxq?ve zI-cFWe;K-+bYRikJ-)!A5+Zu(W5u8j_}EZQQFv62PFXywJ<=a4E1Jll(nA?E<_sEc zKLzbh>al;{tZ~Y#9dsb9QMtB!65Zt7`hID_Ab06ZWuK>D*IaW@zifzae%oVnS0gwO zoa?eNt}}K>IR;D32D8|GMl_G#EC1Zjhs8!`s7xb*+4}atoyR7DM`lNKDPM&SQoS_S zBNeJ^lCdb!ktAso+E!X|);{lVwNJ%yD=gWZDONbNdL)#z>mvp%4PYk}zsbevf*8CC zD0Z70)>yPq+y@`&)Sz9=JNFQ4JI8%_FoPX-jF!Cr8^fZxLweRNTVzUoR&ZS=%!|9M z?ECpG)E->`OZ``{Lr20W$b$QzILl&h>rDvpOa#9Pg5p2pXcl_UjFoJC4cQUrA!pZI zG4-RM=)9&K1}%u7nBZWR>ZxE;SC7M}m?Tg=ZOS@&tI~unLs)@coOIqn8yfmoiI;K> z=;(ZRIbL&ew za4A_DHflVkn~Xu9r3Ycp4DQ`|YR(KThhl446<)i1Oi~u9qvOsAa7^Ey|5I@-TVYm31W>J3?tgwrvtHbON zXLLo#%A)0&nm8t6HGbW6l1f{8!>|L>*z*`Qy3KjUL3d?}ndb~()i-PLPjq0fP$_x9zB=)0KgBC^z&`HCOIfrSJ z!>DW0F>fW?ZCorV-wnf(Yx^X}4hJE5$zi#^U8FMq#?aCu=N;M2zT+sTB?!k4cVKy2 zLz#TW4DqE_CfqoXiUpTB2jgct{0nu(qZYBy+s*~k_EoM#{ zd@r<5mrbg^DaqVMGuH+!_VI?X)cMj>ymXs)+_fC(Vft-SIH}Q_-Onk_uo|B3H(^h| zB!QuOHvY=Whk)=EU{{t4x8x%LkoKY#OzqDF z3YU*z^~1QAcy1<@71v5Or_}_d{tPxHJ_+K^$engW+UCRW1?rxiRlF#8e^FNT!M|UKPU#HDwE9RW1LF;mv`b1Bd{X{1Gh%0t; zbv~l}p|b)vx0Yd$@lnc*y#TQ`n=$27AF5tjs@!xq0VcZXlSP~A3Co7eVSZ|v%0c}Cm8lm(Xzi>=5HqeP3_R_O!LvuepGFm7 zV56qAy7D+|?$8e;I3P6!IpNk$C*V9}u4$rI}i_?v6(7QC5A}*Nw{4~{YuiOwXUffQHxBR41o-NGO;+b3i z=a1P#*j4m|x^)qRS&sWC@AzV-qaVWF*;cbbQTbFhWdTiXehDkp`E0#}d%j9LQsenT zTGPfm_YosmR+SRo99@Iiu1h5KGd|)JRU7v5=`ieBqK|!dEuhDaaypQ3Q93=!9xwV9 zN@~9AnP0^&HoWc&8OC?Q;gy_w**jeRbM7Vbvb>HXqh*S>**Bo5IFN3gzYMQ3Y$)rL z2gbQ~M}N-Tl9j2Vt(`wi`EJ2FJgWiKy?lq!aksp3bU)n7yGyU4j=;BBFT_WlZ0L_|WItYw?Bc^He%dPO^Tnr9@@{PyekGDEIuM0Xa~sHRQ#_1t z3gp?TxtLZTk98EFwCmKDjelB-qPscU+rf^lR~Hl?%AN7}k$1dT`+?5v?8p1nn?Pmt z7*?(C3V%BXFpG1E)ao;?B+&dCylbz(;Vq+C$a57%xM_@h>$aUzm;F!Zi^E;mAx~xj z%p5iU6p7K>KGL=JQ7p{0Kkc_oW*wz&Wc+gu>+W$(Ig!6hPYx?#{e2>&9xcz9wN8H+ zG3cL|`d|}1)KC$&JiH7Y-_^mI=WmI9ET;LZ2SG}disI6&DjMu|SzPaO88RR7`M}OJ zreW_-G6OR_J?a)6uICBbi~OEE@;qD~ZwwOxn#I8r=D@bjd`A*}1|o!Hwqo{D7H6Qv zx>+YmA-lekMs~R}cGG(}(j|%w8R(+yYc!?y%qbLR_pK$dvX}}Robbu=574&A zlkFIFnfzC%DD0DC`Oog7^xH!ZN2Ktxp}h@?|NVt&bzP`Cch+sXU=Qr>IqCcXHLA8w zV9m#Q_Ah5FwzZ$m_L}&xDQX+UoU`}oS*jBoWGN_i2Cia8J{qFcm0r+E^*qh0eh7NA zjo|UoDb)O`8_k;1m+B6LW20^^OSRSpGnK^{-qn(QRh3wisfwbM?>uKRZMe{<6msHo zVSPb6@$zFebZ!;|yJ?{~>5Tzy+-Jo*EjJ+NqXC)TT7&tEC!j{{P)xghm+JGJnKhqj z*!zcL4AW)%W*M^{|5Q<1VN4fQc2mp!ba8)W4sJT6q(D7)P*MJZJdM-xGQQ`%$9s!D z&pv_Q;5{_BEsQpIX`;3KJrS{J8aduSOI!GU{A{!~nwGiaiJ2;j7`|WHIDRy<-7%M` z1cb9)FT1llTYoAcdTHtYE<@nB^&7e2*fVhIRkmc3>1J$=%tE`PX67g?sdH$33MOHNSNK-GS`sOP-C(bXC@sN5Y}6i`gjoewNBJJ`ET4 z(be;d-O^gVy9ESxVFs(7K=*5!(o%;`_`y>}SajwZxjhy!IA#qz{uaPAdT%A}ZNTCk z2Z>#@L><0A@8|mh5++?GE$J}09O9mduMsrqZ5Q@{cUZ%X__JkT2A*4$2)Yjzuu*R9 zX?nQ^o8c)4hHf%;c}hE6?9KDYu{Y%=R^i~*^Mv%GDG@&Hp91<@l2BXZ9)zr7dL|dx zFPI=NOO9B(mT^5t3@GAzkl~0*%K`_on!5Qk=1mVe`F|J!3g8%zTD!0nX#om`rJXnH_pT|mH+d@ENkDlbA>x=#?N3a=v zRZ-LaHJMpYM=#@{G+a_s#0H&%ZqYqhjGKzkk9+4f&9?)u@J~{9TNn1NpDMldoDJ_U zoR!MswOLT#$p3d+1-HjJ_|+{7R}4P}d%Q-2%HS|I%Cxt9{ry-N+vp{2wl1ef^DR-m zRYSRF;5P{GFO)TRQ=xSatXRgn&*BZM!61%3Aj<7WftzO+Y_pLmej)e&2J~W&?@ge{ z)=zMM5O*kDT}w8<)p6q`)2U;BU**{HGtvtW zFS@3=4M$qPf#=%)VWF`Remz(So(+TH?NU`ZBpZlLnJNm!#tV|Qa~o(V&cVXPd@ks> zNu0mKhs`To!-VcNY+XZV)cw00A96QvkV`r^8ZLyBIqzZ5(mAX-BMF6za&{)x8()PA zimKNgVg8$Fm?Ymt`hhx9^~7vgGv|)jwABqJ_MSnhmh+jB3U@qB8-SjZ)!65xqg0eK zk(pk8%2EampygNdP-omg?!LBTiY}9|RQD6@u;)3fTxaHS&x@(*1>nvDDvHkyAI00X z&YT0a7)^W6WtHRh(D0%5Qtz_{IKyETToLYw0e}4ANW~|*J*p43)Ki zzd=(HdO`ZvKjI3n`|=|JA4o;K0S&hsrS+GL*t=t)rM=utn9p@robqQOGd?PauXVWh zyz^W*Z~GPQPf25cm2+7?_3bosy#+?|Jwxj-8!X=+%eF{&AZAq~OgTCL6&0j`S z*3NQDb=Ab4H)M(xJI_kPRD*EdW(iC`wvvl6Ko&ov?W_-Gu@yF$Z=OvXRj)&7_&O{& zHH|8K9kG?of|%hp_~3#YJ}&Pf_37CK_Wjxqm+#I;`?9WBy<-=M+W#PMp&HwK>bg|& zAynB~YtKwKj}o1>nSyWCZ1%~V?_1+mfliPw+xq)8%;SCc--X>+hp6||;&)!$zDo;R z&vk^NKACKT+7WtnF+&-txFs$sOQ1#SGckPHG)b_vVn#`Zcz$s(cQ&gEH%Fbph}ko- zbwVw^`_wEIE;DD>Pc=idyD6Jqy^o?E9i(@~JbRUSRC@BUoa+4UQpL}^a$%DdzUNM% zw7`j2aqIy7a8bjaMJa5={bS73IS~iu%Y;6bCX!n(UoxnFK}7|ehxVkx&El07otgSj zdUE0n3u&km$6v|8p+{7N(KbeS`1TI+sK}QB8w04EFQT0)_ zvTk?*SQdsb!!1*=6wH{>c_2UGjQ^QT)D~!KUP#(fIi>T z@ZW^?@bG*$wlmis4cp0ZYiBPk&$#r%-xu^(1p~-zt@6?_g^SlsLZ049^`p zh}9}P>GIXZlAD!Sy7ZnIUGFuMbIo?aY6G4((vpEF8sV(zqr?p_1;s<|-|aZZgeAV= zvo@0{%&VO@bT4@;{@}AxhfA(x7HNdSJ2zahD<3X3`AZ%y8^z&ed+4Kig!r+2qxkWl zHgj+H2c(%!EOK`uz1XOVdB2CC+T9~!nR2lRpEfgdv%#FnG!W+S`(9(c7y8VT!G=H9 zY}&8W%D+QY72gwAup-BMuxx^d+%mQY+BCcun}uWe>enO~5iKZGnug&Ki#S?)F_w{4 zPx11v9JbWm33u*v@(8bpO9<`J}-+u=J>kP+4+9G$`zZ7PW%nW426T z>`($0?xR^_wJV$A^n&d48bohPnd0J%O`s8}#@!G(bn>|h#=DwhVXyu);Her`Pc4RF zyJxT)T6`CgqJ{4kEX6p}RCerI3fkS(g}mn%#fd!!Q>}QFy&u+3vNy?vneW<4bH|#{ zoRc1q=)qgz2mQg3mY=%=6|<^o?6Z z`_cjI(1*#)KKY@Pwy_mPl|n<1Q3h3Oq+g8rEyY&YMZ z>1AHV?!m*b*ip`QIw+WymKr{PI~O#|`MECPC8@O!liv2yW!^vB@b`^9aBTE0w$8mB zIXN7b6!Q?Q`x|57@>S}7 zL{1^g9qC)iPMoWK1iJj5g{|*Y6tapx(t|}F@N2fJqQ3ABEloGUCBIM6R^NSi|A9U0 z=fhnW6C&ifiMv>aygjoDE~AFx+06dPY@x8Pgl< zc~4}Pv!`^k7x%i(Sjl32EU9J5dtxg*aCFEZcGqYE{azqbOy8d;ZC&YuNm}jDr}O~* z9AJlDDcX3w$4>UpLl7Ds3>2035^(wkLHv87HxBcEA~E-R(fH2->^0|+Ja}v$7U~y< zlsJaTmIO&rIrY+JtcsKD!fH2xH2oNK-m%qxDc<`ncqs zXuveFIiLUBd^ltMdm)TidzQL(3zrOU4QEO5hiSpyWXk66ef8&8*&~ZQ+@jf;^>x(4 z*IP+kw@jvJ1fJ>auvb)G{zMl#?j~AzP5SIU5PuCZ#MLT?S;lT7Hr%NPHl6H5r8YBB z3`)dVpQ}q>oafoCz0Z^`|F&bt?Onhy$cv5hyGDD3kt{u2P(=AmrEOiy=yb$;SmIwn zPj8pOhYe~%uES6)IBfyL+}dCP&vAZTFpM)9wBThwqFSp&z`Mqf zH^S3-d+C)MgT%Zz`QN)O%Js)InMaxl+v-1o!c5hL=@ZOpMsG*>=YOBLzC)3=J&I!V zisAD&OT546ER7nfiKG3_x@BGHiT8dUm99Rt#lHQfvLRPOSeSk4QFO~yF^W!!RU2XkdEZ6Vrg}ialdSI2f}yR@DbEqMhi zHnb5uZ`={k+@r24`2Vb<*mfu2OV4{WM@+_7 zbqkn7ZYuWcI}q07?4ij4q13ak3baai4(zb4G<@DLcDw6Zim~d)0uJs08}kD4tE^)_ z{7n12>nFY+G)L)o7fe4joUQ0CD0WRXW#O*<#5r>&Q0SXrj5O|x`|t36SW6c52>UEH zX{jk5WJbdFiyLt^=c||$-lS>9?_j+_8kO!(f%BR^)XLuhgLJ>a$X&xABu5ON}IMrJfaha)h`pNEu4FFFS^0*S&{50?^X@A zJH-zEd_@c5r_inHYUMqf8g#T-0uQSmg67m(njV}E1s!M81>UC!Gtv@|U)aU;MvY{r zkaG)$$%N5&mr?yI{@mMq5n?N*)0I!}Vd?kdu;h;st7vnAck`?%RH2219gJ|msD4;j zmc;CL@^{c_HFP&VkG6Mi(xH7fp~c-A%UZ|75(l1T`5powZev|j45fDkR>b88G{S3;s~k6VLOE6?H0tt>&L8m3J&o**L?& zWlQP7-_;P?Z3qUq=8)ob4ICahkPbLz(ATq3@Opd)IiH=vwUcRdnDY>fJaw>!oF(fLbiOs(q{i0k74=Q2Fl(4b%9{;R31@2{QI za^o>AHqDpn98R;M>@$#jFBS*zp6#-ZDuU$_Rbjc75B}pj%SN7^`(eJC)<2tqM_&no zQWi!RIWwubd_TDT31)78LLHN1PJLwB&9kiB(u{~x6%JRuyD15jP?lKQ# z`}b_b+!LA@uqPc-Kmp04Dn!dm56DkdBE9Zi*@P%hT)u&nd-!gt=4U;<<$RP^A$*=X zPo_vMipMJ}eo%T?mbk;&3=TJiGoKVq@QE(OO?>zLews|s;e3>>cUI94?T2uF<8-!h z=?NO!>QAd@w2G7WKL@9!c4G6w!QgTu5|anjh}oBVQOb}+cq{m`z<#RuVM>*}!$>=- z+PneHFC2v`K~-`4gCMMWI2NbOI|(1XRox$TTZ`*1mhxH63D^=R2;QrkL3k4;F8nPM zo*bBi15EeP+##AUVw)OUA%>&K=a+9*f2Cm~qL|feK`_tt1jQ~5wv1Ii6r%(LrQy+O0)%Rwg`mAYK^Pd6x<9|dlN@~Y+@0-!J zQ3mXh$9+mUUI*Uh2ib!uX>Qq{PC)zW|JY}X+Xv zSv*^S$C^yo#NN+fvaKdK%VO!V70*b`i@`PWaTs=MJOmo0vZfwucn+s0&P(E1Vkwxp zZk3o}j5i%Dxd!S#iO#TsPjY4!}4wc9We;aWjbtxY%TQh>7lxs3j4@op&J`)pb=T~v2|EFN2! zES}}vR?P@vhNnNvFV;A*13ovPw);~tce6b!Sf(PJTG^X( zE1bkv&*s7#{T8Z<>BGkTcM`MDDQN7>5tPz@ymE2R>+GH@@BbPf!TKGlZni3B>_XmQ zruyR)1-j_qCnp2yAnk;IYm=dKnLgv>JV$4G^E;-=RJ=8)K;D>N0ZMv$^!)G8i|KZ-_+a7F+N^iDhI(HQfz9Yrl|3DuTDnWkY1GJAi zz}6U~u>oK0n6-wW*kdZt({>%%&0Z$V`H~wWPjxo1Mu)o>y5qj!Qd+7U%&z{JL?d{= zW$(I1dec`;(Ochv4ZX9BJ6Ti|hj{ixoHm~Q6SMev=^ku|@ng6^9ryBFL+ith?BDo> zG{8&M-6A3boN`jp;!As6vA+*?^ozZ7S7*DOJ4_H`6qBc-Z3`IZ4fTWK1(qtztgi5L(qFWqm&a4bf!50 zIvAWMRSk8a*>(U{J}@Ux=UPNmpu6^2~qp1_fXxFeyS)vwUU4|5Y> z@8{lV-eSzwZW@g0AEO{ds~*xcIGZQ=cThV)xfjeDRJObC%#D^#PX_D6lNy{p(-|orFP>C zf@g=Bk<~l08Z*Rw2j|8v8l{IHlK+rvx(3s_F%jHuJfK>yK)STc5c9@z=PTz@eD_Pn z-6@rD+C~M<>r$oNakkJLxsMI|c#UE^xx$v__AKPqa+c9ZFyV@}a!92q>uh)f%^x1; z`&XH;dy*&E7i(bUeBRq!rYa2EpO3pHb!OYww28x3W};#BKjqRNykk`O7jyT05a)At z=VAL*6tprKytD2xwX-4ktVHH^_RAjDwL53X1g@eFFOkJ<9K*cF2=a{5T72J_NH4#K zQ*C_+%WLk7i&7)m@k#D%>bYvA+iwvwf)#! z`$6EDJs$rSex*>(q50YHn64~Mf`sebr;&FV#`SZB^&Nsp?>^w)A%emx%$kYj3pl6b zoHW@$mo}~T#bs;9(zZJh%1I$>X=lJsT6>@q`_vkNmF;wJMQtEid{BtJSNg%;q!uH#Kk!4Yl_R!b~HON^f?;LL%u$Z7LFYWFFcMGdvUcx{>DN?E3;tG)#@WCvg$=jYn* z(qTc9-$+SyT1>+IC!*H% zEVe|lV?T9|V|%Totm$?othmkS{hl7Q_hpDUesnzR@x>T5#v4K3L#9}NznQwvDVO?w z7)YYACz`x#x7%93ghrV;c>cZw1e+)%$aSGOz?i!D9&A4CY6*AV7cN-Pz(x%?Y8?V zYqFBct~!8Jz8KOxjIclFs;DTvaGAwAtg-z}tam0VI|>Sys*yNH>yOxHo+}G{!TF3j zy5xU&FikMv&eE`4yyhLJy!oPy(z0UZH|E5EGOmEWj2I@Z^_hes_bs=q`AS2(@!6!G z5B2(JAho_Q#V{ik{M(Kw{Kri33gOQf(_f)Jn^!HVn4=_2n^TD7%8(gG0KPYa4R7>P%X zW7)S;kudtmHL%f{58KrvMAg|7>Aw?N&~spa*6Z~@TB211fmNY2t<(rjW826v+ZjDN z7{TU*WT}0rs$yXCO(?mP0Q)P9;1i1yn;x6c_5K^_r@0xM{_Y3e?iK;+;|(!Q(T{hW z3TefwA=srw5cV%UCcjgZDyhD?h%5O0W);toXj)`bjGC%KXUIu$$HWw#J3hn~Y`G80 zDV|JM|CY2k;WC|FmnoVQ#lrQu*FpSR1#O2D#DPVBNgAub?($F6VeLeSY|z8(%rshV zXwRa`_K4knugB8(Vc1vGoocpckY5$gs4cJo9Ubll?CVE`vJ!id<>nG-SJ@H z8ReBx^WnvL9TuIOODVH>&wZ>+aXV-^TXEi!dAuN{So?&qCWg7}aiv>^9(c<+4N4~V zMW^&ExOz&z)V-E>qd2!$X)ssbHRqjZWWxOyO)>>;x+O*x=Zcut1Fp%rLoMimyH-Up9-wOM;_b*Q6p`1966v{Y@6qb3Dny0RVW)kMgvuF0j8_%oDK z*^8>r719@PQ^=SqW5rg*RR7?SQuo6p>UB+m70N*nKClUXz1oevd)=e~Zvj>uZO4Wd zPDJ0hkMOozCWZ2>ez#|ua*Z94tgT!{nAyn@%csTTPtA6qQEG-ztj40a!{X^kz85f@ z%eqRD^s@69Qqxd^#j-6JQZkgn~KWDuA{16WCO*L$GMpIa-(fk7kWF zWQK0s2khvI!9Pb}*7@G-{>-mp-tqtF&7lgV<ISfRMI643zLts1FS1rJ}ZXD{zWkJbrA*3I|3{3 zPy*)m00m5f!_#llLZCpvXp3x4ZI z;rz~}(hoRwb7-_i z4{!9bfC=9GYhUsOEP8&0vj0+{rPCjpr!6S{Cf|WxOAis}yTd|0`^(xo1slUA!3M3f zyo-E8IvzYx3_(*=X~#XEIzOOq%2VaJn}b+Lv%6&2+JkMqr;Y9IOqJX?*kP5eGNW*t2U|9-X3fw|kd-u`j;PH@FZ$l%JTf{jTzoqf-hQf*MaTGrZnf@V@^my7Tq}Z9jd03{PDb+cb=%ftoF#+CH59ZS0HjRVuNaa<|_h}n33a75QB$D?NDt}c0bv5IdpO#E8T>1w-Ke#42ScgIb z3BsCN10~(W!?gNA3H!b&SmI9Ca5Cq<_4Xn0u&TmaT#y(_OMecQgt8=BoVOc(ym>&jAKt+1 zs?*Xrel~03|IW+iAE4s3m8JFG3-gVf*voC^Y|+onETQKf4DsJAeqS*V{~MSp&93Oc zCLh%jefUhO)B9Lx=*d}MTVo}sxlyEA&<)4@MR*jo?l><+n;A0;wR$)bvjWg%9 zr*AWNV@az6rVn^R`{OF*LA}>OX+)azP{})1eVb_0v}&!L6IpOU9^EskBov9){!i%y9yj!@|)`JR*7Gf&U|-RNh9ZYf%17a`PJ(%;7qxsCPVzDt%B)eIPZNjcds@H z=+-Ha7U>4FO+5R#Nhwg-f83MtDHl8RE0J7h92Il^+=j*gzU$;{tm@CY?1z61{p(~$ zS1&h1kIefNv~?T2=QI7LUaCyn%#MyfZ-Xw0r>R|=7s~rOphGpW#F1Sgx*=D3@p>ra ze@TIsI7dh~(395ied@Mxz3BRJ7u=UH7JJ@nk=K0^6h`0nQtJzQ9R5`%tQkB9^7dFs z8Qm^`%6e-odhaYAx|$Ac4av~q(kQoQONT+Tc+w`H?{E1Vh;G0h+j2Uz_FS3pm&`1?zjqNW!W(RHGsoCyvIw!S*bvX%jk( z-h*Ghos&xWJg+WGP0`C#P%QfIb*W+t@660H!%-=RA?leQSQv39z3pELzTP4pP2B^_ z(?Ue^NyG3(m9-e9oT>~z{gq~`3BtzT4s`19POKgh1J*K-v`RbRucjJmF6l14RBOk& zncL!@U&(N0V0Y9@HKy$9JwUTb9$;cB?c$xEM9)89cX$uY3gELNp2yzJa|wkx8)(Li z_o7GoK`B(NPD*%nPu%>?mQ^qtc5f&*kaB191EH9nUWp=2I~C?=7>C=^gV@tK7ib~R zjMr(Lgo#O)*uGW|y$6EYG|(vLXju`R>s==?b+K ziRd0_1^F?8!r1sdyv=;0{QG{Ew3{;u4rcb30^NLBa4DYw4VfYiD$a%(dJV)3)eyfd zr=syYu}4^Ydc~P>TduzZGmpWj{mGZRvi7iBS@)s*t`{zHvFCl=NAS~eE9h?bKv`h~ z1Y~c+#hq+v&7l%z)5^OgzphH&e`oReYY_7&)@C|0&XbsGi`R#2p@m?A%eJcFBsWd` za8p(B(7=veAE5$*b}^eWJqRg09v*B7A+MzbgR5=}il!HD$uT*SS@s&q&UKlGpRc}` zGCAw*PhUM8{6i_dDXXWGMcg@Ots|X!;0s1&2O+CF-@T2#1|xcM4$DLpT(P7@bV&(7 zSGVI3c;Yx|wOwWZJ~z;u=Ce$v#DfyLPD7832GOkPGklE80Nb{?Y}#s9ysdYRd2wz_ z=e}*?g9l1wr^Uu>KtVPdkJMlbbWe*p1z~v5MG)%xJ))$?RxH-#JjC<8m9uM4xRAUL zXN`F@4)bZ9?HpIE>=R>q6TYY*eYR}7}@Jx2d}5vtTLqo{~i~4F*N>sk&cc|B|V`Z zjXBf_t2q*LY^Ek(U_eG0d)1))(DM)I6q~GX+qto*#e0qQ8Gpz#t1b4$+3}rc$*7$hh zHMsm;osDSx0&{deLv;KX*qk!}Hy*w(uJf_Nu_f!laWwB58(qRWJ`4M^(;aIQ7qDlK zRH5FxpLkripB@{{%6K(T4xrAOP=||m9k-!mN@~hM|Z^1`N`OBSp&OvWjWjZ;iYJMJBmH^ zih*}hGljLcV{=mz@V&1t`=Z$(X=e>Vv%vk#&^HlN*b96w_Oza!{rwC-(hrcg-g|H>KO#PAIWN7hS3o>}#-esJ#2^k*pWqV+{s%FDdZRPuN(`7n4Nu;Dqe50ucNg@X>s1hvdF^k306q8Yru z!t-z`+qIFd4wfpqJ)&MmE>hfvWjy!C`3k4PvDG0?+3`O2zjeJMJ=q<>40lB0x4t&6 z<4nuwqt_QwOm|}y_r|i{DylfXIEyAf8b>`mm*GKIf6i4}g)bDzux;lrSh8XhmCy#1 zneptVPa<0!+CVNPccF3Mb4Y)tMfs<>C*|KwDp~UegIhC2JN|AtNju2DVy0MhhWlp* zHNu9=gQeoREu!z>U|{F&ixYh(vURVtq0uKBp1xGDsH~}U{o^Lho9u57KuwI{>J@$C%N)^1-)MT8*VoiP(<~1SbzE(xs2FP zbF6LfXLn7yID|8i(_&!$ay2$S#E@<7b(DpsZpDI{28z1GXR5!t!i-UJv2>K0VlZd6 zW#nq&a+%*|sZ!;*(pB_;iF3e(8IgJ$P^uBi46d z>b99If0_wSIAX>gO^v|CJkv0DsvGFtxyNo?Cmee$6u(lMSaWy`9(tJoXU=&`9+!RD zVYNsw4=^7jAplg&le48CDmMvWXHw>-K`(WKG>Sjxq|cHSg(R*vr}<4 ze`ij28w2~zM!=m&O%}lSoS`B0tn6Dc-Yd<}bZEKfBk4u~A#!#7}Wa1Cl-sy%s zC14_R`Q^Z*?;`JGoPdc-yP;_w`M&Z0dF+hr6)F^wQPT3<=P5;dNJIKiw2b!N^#~!#N=hXPZ7B_T?(<5L zhDb|k7tvDD(vaWv{r&#?^qV2XD9gyadC+5d2CS$^gvW0Fcq77^VB2Wc()lo`*6tzgBU1VL%I-|O)Su3j zx54D{xzLCEUIKF+AgIxe&Z^F2Jwmh5f5RCVzpN)d_6x*+4UTN6id1fF)fN1`{IG}d zBY3*69LM^*u&6QiSeR^zPQ6Uv_|`}0993Lw(PE)wecl3O9va4OooRu!tp zXu%W+zQK1L{3nSp|aH-rXI|MzTy!#=3>^oBV|&46(*JoW{E6W3Rcfk-> z2?&-|l-r|>qMawen3_g$BOk3I>OmpW04ixzG)?TdbA2QZ(Rg_trVO5AXU zvs&xBv%pRgiFVv{^u|zh`rt2Yl30Vcr$5Tg!UdbD>5x#ehb0yb!r;t!S>(ghV0=^= z0?tSz%Ue^~x$1LpXJ`>L9SDYb9&YsVEbosrbkN8ANEpp~@nsFUG=9<%;S_(y2mj+a zu9|%6Jt>L}rA0z+{3)1xW0~+p_b%15z zd!{i~7{3>@&lRydKkmb2qrq%bkIm4l=o)LOxy+uvAB*bUu92d3D5i5C^Zg1tC|($Z zlA;9Gr}HO@zgs4xKfI2+`EJoYIslX7;+iqs=TLZoUK!jZ zvOFhvDdL3={@H)0{hjiA?V*3`4}eC$C&GqAsbtI5Xfm8=#en5&;L6P z1%+G0Vc7?1Y)U!V?UBfToU~<{15U!a-Zq%hO#>Bv$Km|=BAWhS1*Oa!LM2(o;2nQK zOdCIrZQ)G0#FmjX>`FS>=vvVruidP|&j)({yGl#G@co^7Hmz;{MqQo{##1`oFfFu# ztYQp!r;T^ibJMY}brLy0P9-0|D{NC}9wx|@-EuYMB-OvXxv9Xo1 z$GFfa?gWbXB9W})x#tYM9q@v?r^1x=;KjVzkk-&0XYh01h}WlxD}Slm*j6}|xmDJ! zYyq9Y7{ z`Yg^*I6!74iGhWmiU%G;}*0_OX=L3ZQM(WHl9J~C7q{Ooqh#fx$9us;uw}xc$%_Y?AVNlr&&W_EgWkrXU4~* z_@I5aVBfJH@}BI2*l|zb>LPRe(D<4Rh7)Z%X3DHa^+7gS3wH-9%iq=bQJ+CO*bz-_ zEHzS+460XRXWQK9_b&gn%bPg zwg`>%+*&Hxqu-n5wh>s5_oP7WT4Cn09B?h0BsQCzg85^NV3}XkVm1gq^!fC9joOFY~ix#5vA& zmu|t7uD3#$nZ+>Q!SL0rx#Z+(-c^g!L!aWY!eP$VatMfqvia4bP55Fqe#$T^ zd31*6EesYLb^8eC-&BLzLq`Z!)}SN1pMy@?JhnT^3|%t1!by7@R*?A#=H3Mwl=m4b zUVou$TmKV{y?;{73@!0Nd>y4N{RMw6OC^?85ipMPWc23vib_03+tC6n-{iUAXFguY z2+JhanSx@UY=;Az5EWE~fNZXYp|7m`t zTlb0t#jIMfhPx81Cs|-+*FVr_n>P&dcc-rts=&a%GgUl_gA1d-h;8>>!Ml5LNulp8 zlFXFI4fQ2-xU&xXnmHbN=yXA8?IbH=2@6uItzBd=8XTU#0d~EU3oA@}u+;}I!l4m!sPRrS zjm_!CYA$3chD~e**O4J?^00ZVY9q2MiwntX_AhuQQNi%pyh+fgQws2hEuGd9aHfwO7Vw%t?=3R*|k$L7TGri#ft$8rh&AUY zSxF_2WYJK&QI&W)5Pl9k1H*2`(9u5Cj>W`MY>Q z!8qn`mj~rb3!zd?8xO`Ydb{qM7-w8X>5|@XuD=@7H@GU6&Q-#5>XX^`4@246KA%aE zy95$1&!CsrRavO^7MS5;$p)+L!+kT8Xc`*=v$bQ`jai1Qiro-a z_N*n_m^8`=)PlJOifO$wcYP;w<`wVK>E)h>9XwaO-R%zDn>8ESbJHPsvQ)A})*C zA)lvp;sifOocS5plKUp~Z^;(!lvxiUiE~Ml?^Wdg_OUk(X2Ow88RF;ADEg-HYf@p>1fGFi>w@WicHk4<4VDHfOMdk*Vy9YG(LUZIm>A6Gz$3a* zPpdhc4RVSqGtR-(ika~B(hAPcegM6KB$DbzW5xXJHOza6M3Se~1COhAP+im#co}*H z0(O3&#`s?}G|~zTdG6xS)Gsi*XesKb*()^KZougk)~w6GC-AW1r$QQ$%CvcBAY_`d zME{%~{ph=%&6}MiTV2U#aCNh3QLa5Wsm0+li&H|so73UQ>78QCJ|&6F$B?@NFXQ(8 zG6;KkMp)BqgmY~tFy)KC#o7LxJN7I~JTx+rN_&14m2{Oba#_4Ec7rPZeRo+r47qUp zmM1&dJCz+%elM6cZDudrHOQ)mlsWZ?p-CIFz%s&!t?jML{7VL6+9y*;YjMPXQ{72d z>8Yrt*9QkJjuAJ-sK`6suET}*`F;NN6XD*FF|5z>81c)aO^`jh2tQZOqkd&+@Mc;S z92pl&YVT87UGQSu`BGU@w%Zv)P3@T9!ZOkB;WA-!ug&zVR>}@$M#HiH)+sJ5C_%NN zIQFvNbh_U-fLy9ilc7@^C2r(Ayth8=hVyJ#FeQWzsCN)6{Y4ur&6BYBKRXm#J8cm( z!*t@&jue6Lg^Wva9KQ$x6k=qg!AqO=3f&N0GYN^$I7W;oX7DPgewU|H;Z z6*k%L4PF1B!Dj{6*rmD?Y?~(Umwbs}$NCu4UK@YWFpfKu3$@T?&@u>}uOul;{s1{K zW8iSD6As8;CEQMo6K-Y1Gefm_NP2Vy+P-B#(tKse8Y7htAKeMR)tTVP=j(*n!OED`VoX3>mF~Yi2ozSmIif0eV*ow}d#J`y;@@%6*ynfjk=cg=y$jwSZg!T%6 z=MqWQ%~5Qae5`5>EB;#Gms3*7 z#TFN~{m*LQdB`%V*(Z_f{E^DNofm-ypNS3MR*OYHvT4`V-E7afXR?87PSeGO&&fMo z4UKO}Js+=5$FJYV3T5lw3BHpT;y-&W{Fd)3rcbkF71vyt>yNQ);3*gO=;#yv^BF26 zZVYFk(+siRf&2JgMj?oNZa#|pVsGS9c+d%GUePQj^SqCxAwN^Uxga*H9HV3%Uv{sm zp1SqV0y9Yn)L&PTO!*Q+&pr-@A?Jp(xDWN>a;C$Mrt4$B8If?X&PWJz=kJZV!`Yr= zxpXUIun^y$?}}WPh#3zb3hS>)B`)igB@@zmvE*}qq5D-6_GMBC-p&~Ee;L18CMR%T z!4U}hF9-|x9Z~zi(~_YYb&76L^-%iynGpD6xguRE;+yo}^riYJ`@Lf+?}BU%QJx+I42a~z_@SzmJT)0k28W^ELEGq9`R_;dz46Wdp)5EDcTK2M7)m&$`x z2Gb=CCHZB3C*Es2Sm+!VPhK|Mk(d(-jZvwh>+D1H^|mQH8Zj5T=9^)5;#hWMr3sDV z+02@266~q>4!YW@3lTBVc*1xkT@GG>x3vqPB37MBYs#SA^%M)TeMaxSIM=l2MvU+8 zi6PwEKIt}p=Ow*j7k+6_{49xNXuedEvp`o&O6Wq@*SAsarzub{(-pVOb00idztzK& z7O=5?JR@Ddg!LP`nlyOMfxU~w{L2dX&*UXcy6ylOPz-9r_F=TuC~U0_5smJg#Sx$T z;{+`)I(f>CO+RDLrf68e`^9&u__#kEf4WUE+)PRCaf3gbEpvqnkH3mWMN-zhARC8B ze8fH&Ll^X0dxTHoVUY%1!Bc~SgkjsgAY017fq%axK|a)@ z?`<-_JRgU)da(hoU08M3-{RS?x~Mz(OdM;dg|8ID0MH(8VW1a_^HBhNsZQ`e`H88tqkgT1;U%9JIQIbPh0r9l>{_87Z# z1?`xn!z?=1(A%ytY~ChI@Y&=8mFsuP3V&?}kDxRW&(*>wou5KE=QSNk*bZat&eAmJ zF|5~q|f8Irll(URIL3oA(z7$h(01JO?^r z%KJ|#FDPZiCj1wGY{=YrY&_9ls7vB=gM?M|?7Rxg3277Wbn>SCMG06bD-zwdZlyrs zEcrNILS+?IQge!CT4p|QL$f!!Tv#MiE$a=gx{VaB9EJ0c@g|yWxc(oxy;qV??@=vw>_k`?)g+`GS&B9Tvc)BBQ*gwF zyNX9ImeZW)Ehvw_1nvn=cquCw_qr5%+%-x;o6?1#!=Lp>4D}$rNFT3^PJ`!O>J+cO zfK-EL;YCwpa?l)xD_7ev@l6x`QtrZp^(=4y}Vep`Xp+dsU%-%`OT;Q_IbMO1?=x^<2rA0L z9tvHq%3+Rn6=z`$qHFG1pdg84cY!(#*M1J>$LFzoEtaxbSNHIo<{n(_)R&zIo5LB@ z*Tf+YhOw)+qEUZFvFI@h;5g?Rd31jvZp|&C(3fXH!P(0bHKmfNXKmoDfmD9z5(qrKZ z&wc5bgtHz~KXC`i8N8@&#l~0h<=o0NahBa$_^Zb6=ewn7$W{hrsj!vg>JV&6aGMeJF8!$vEue$n#`t~ zCVq|BB5p4!K$DD>IAiWpW;${NOHx~cv-teE|GV|rs&bXOm`mi>CPgr(7-dphp^MXZ zTCtNd9en0fP5rhgpi9^|A@0K#x_S2*Eo)Yz2j{O-zxxZ(MP5%U<79BSEtSm_3$gV? zBlpAYfvU9+h0X>E;vav7czRM2bt?&=4UZEj?j*8}fg4%+0e$>!v;_y(Z-vpZAIS5B zDl1u>iTlU%{%##-1olYscjyB)1e-`1U@x0zqzWk?EstEhvYjw>;%sFsT40W^r;$&;PG3z=R@5_8^ba)}v9 z9?nD>&yKP!MYZDJpC0HUwV=_DrIPCP>9{|!l=-Sk<=4XXz-O5sz8&b!UO$WH4ATtZ z?Vn0or238~4em;d_-B8{y+kY;_88Q8UaQ~3tB@77h`ObiY=*_~T>_EeA=AZ0`vY$sva{2sN&2yVL>e3kQ1DCU-7aUpp%HQzpppsns)?O3*e=H1SK;BCE`%CX17 zr!&g(*0!g(!LuJTzrPB+Glz=~UEHv5)mGu!P5wDDp1{8P=90tmUt*MV05fDeY0f%- z_~82jzBbNeW+x3Py>TS-4425;9QO*1BR>cu{@oFs&0h=JJd@bm#S|-CUke^d-2ZUc z1tu6=h2zEAeAlVQ`Dr}Qtn@>yoU$9c`YNM+&!;r`)KIaU|M`@C&14>6E1c)~G-12S zCGU^XG;F>^z9im53|Mx8=DrnZfHa(prg&hi(>|CRSSZSJ_>TPUYr(rH)x%CwP5V~v zrcbSPtoVl&Hvf)?MA=d}x!bKPNX?NGG)ro)a+ zJV+yh$HJ`mWX$ad1#{27Y>{@IP?r^rc1d33(nlT3pC5ub@$bmMOd(v6O68xlkHdc@ zZQ{MT``AG>5$9}{$orJs6F1O9+VfusjB(yg9iL*^!X1X}R%dG*vXak|+hVbOgNhIz ztHvfCbzn{%sjN7ADs0Fy;r@3+Hp$YQwe(A)f#s$wPd5(M{8pB%N%$-Do8gGDykES9 z`>5Jg+%VOqAM^R%i6#C0K#ykTvC+Ice=+M5%dCr`3?I&D4*5cNIYVLevB%V-KwVbo ztHo}#4P-|Qjp^6MLKr`LGe)hwPmjIx;hmZc`u~k#6Nc1_&i6axxN}7iY_t(FV`E5T zIXkU9!t{5DaOuCLP5NV(%pq{jNsPGB}p)+Q+?? z?>4c`GGl7!)c{UWUW&R=w)DBn9PHEHiC!A2iCW_=IpBOgKDe-yS)R4Qw!%r+Z*D7F zZ1P^Xz_SNzPZYgO-wkP|WkQUBEfy`P6>EFk6E}NnQ}(ZYYMv zqc@h*nE6U9Pc@Nl>F6-M`WVzW*iF1L!km5mwgO)rxd7IGBoawbZ&u?Zp_@yUB>mqg zOMbr9gq=~!Y^n7Y)^@knV`S?BcCB*`8Gj#yQ?Bl0@eWFo<)aVLn@NU{C*8*0sb-?b zw`Ew%8Ov(9$Dtu-9F_h131ckY&~)zI(%N&CcK&&U<~wcS=%XF%K(z%06*%I1{b!=j z=~U)1vs8BGqeQYRk9!jIdSm*z5KI`yJ+#N2Se=y)`mTIJ8~FS5}|pSFUj^*m0) zTfNyopIF%)Xp<#8_GCBH7&OQ9Wxoz@7ryJb)1NqdTpqd&LN*R#T{h%F&xBgV@Q^ei z!%T<$@0oK}{4AK9vtTzT?MBCeQpv#7Cv@VSIW`V?0AZbPLwvWcBb3wEx;;h-{YEY+TAeC&M{A%k@weEigUDz08_;?&Eaq>0d(UrlztMem2*wjH7F7 zXTn4^J+|M=4)^30FzY+Y660R?Y1romDoEh`JAUVL=Iqp!mUEyuwwJKlGz8zaBnT1a zoW&fu2Dd5pvfocS28+v6gq_JN=yy;rc6FyNr7wEK&+zKZ=Z+lB&B)`wZV}|CwgbI1 z-^0qDPT1SwD>LdlmD1bBVcyI>Fo(ZqCfFyz(?3gOLG)2*_OcT;$v7L-I6!=#CBdf= zzU&+CRE}P^5wAZoRJeV+Mzw3~g-V?mikr_}jb;Pbvx)!6^nMV_Tjhc#;REp5^=;^H zKZq6Sb%Pg$!?AV*_m10X<40`=R@ZH(P^!e8^hx$Od*}|<^O8Gk{yvS|HLl@+HSILd zJqdhI6bVmbZsSR*E-EWZ!HGP(9IAynDfjja<%MNQXzDRhWu=(!*my^Opzo$|j4_#~*-@Q+BM$ zf^+wDXQRWjacrQfR1!F4HD>^9!JRzw+h{uzFD-BZ*}DLAworkUXTMV4{6LxzF@&!B zm4Rx?2+X;k$~G8ug&8*|3X?9RFef3A^kp;IEg=c#tla>vvzDTIuoR!9tK-xM%917C z*FBVI9tGVw%e;)Mp?1bra;vNtV>Xz9^HfKyJRT;D|643%t+o+N!&}74e%0b9&MnI; z=DgU|!{~2YEc7mxNOC7DOIA13K*AoWxQH8u6_Y*UDO}P{$0x5Mt#`k z=0toO3FO7SiDSBwqV&KLoXGt)9a$;tV?!1FcWH*qt@CBtHNjih%Q=LVk#h1V7>%sHR$tV@Z&_m76+ z@m6DKKEDvAEjyoT5YRu8{Zo0-N!KXV%s0;j;1+wrZ#Y zd21i1sXJy-W?i9}W#UU3c@{X-^eeqH@}dc*a`A(#49XNfIKX#1`(gT8+~Q!4%@d8# zf8ar=J3odh-|5oq<9X1q-vl=doP?&~MY1RHzlDMXy-N{&dFFR7XU+fS=VYFfeCE{! zbuY$~Wz;z4&Udkup-r;i4JW|HC6Z3>_W>oZe_&DRL3=(dVI`Z7!T#+$Q?99ut>di3 z55x3WLqRnx+|iSnF3(1uxI?cqx z%h;prpD?bYj$UMQf2?+qEODa>>nFKI%KqC>?RHOWsyByq)?=vbwgrUv&WHEHC{p$u ziH#l$Imb|!*;nqO#Jk^7e+losP37Njxid9>ONBF)8ff);JUgFo5@OC};Rkz(BtD6I9b7k8X5gvY<)K`5U_L;2bL$%lGq zEYe{W?IE-w{0$rfPqyigolse6kFIBwOu|x+ zBE_#(3t04Y7G}%+So~-&%yty8Mb0_CuD58>VqMm9@3_Dt67angS>IuWY+Hps>mRU} zc@I6yA50SVc9uKqaX^!?@O*07y6k`VR7oSBw@#U$DeRg04sP$O1ognKc=^mB81zF) z(iof|M1MCy?J1+F;vqu&Zg;kjvrQf=mf}V)8^JEmov!n9gL$nljI`2ZyN}PofL_O7 zea22GMK_) zJ!;ODT%3n_N;zbH*cu-kenfw?4C#&0e?sbweR##`H|OHmvCAG8NLIX)%~9rz2CX8| zaF(86+R2^94&;8Hh!a%Tc^~|0y$1LEXHs}$9ZYr~#e3K8ua4jDchx4Bh|!jW_e z=Pco|v7UlSc_VZLq_CMYI4f%HU??9PMMs)*AuRfc7=GrjXuJFqjft?r$d(*YBXFMB zJ!v6po+^ba6YRl5?#k5qv_XbdCm1eMku3NV!Mtsjk^J{5VbNXw|M1TiNWSHX-aJ3D zupo*Sjg-QdsS3NqZY1236grbZt4u-n)sN^j>;{U`@?j^wjBZA}t$<}=A4b@rxT7EN6* zWtU1Ma#8n-cwxl|;N){j>GK+xD^eusQBX8xU^vFo!SixZk(lmipzW;R)?geBafbUitK4wFjiMaDhbf zJZ!hHGNoGZ?Hs|n`md+3388dEW`$=neIfF|F>2#EDb<0lI4(AihCCk6DtZrKTltQ9 z?aCPX^*ewWcu$AK3p~@nKR**ke-wu5@+|nQ3veVe9G*r-;LxTn5ZGDeJlndXla*fo(q13HvNz5NT|b(VtN$}z<}3uXDJvSfDmjund0W5J_rAl0ns z2J6JzG`(F(Ub=V~NRCy&(Hc+CsW?J&y2jE+i!Cz8w!b7z&!D+{#{1msIW&tJn19nA z{d_s+az-E?Jk}Y%^9=7bJ>Ii5_J*$y#=>yj+A0#E1Sh8z-v@z;Y2|Wtm4Q`GhJVUdJK7G0k6Ku7Zid;$3vuQNzW1A#? z8pm_wo#ud&{T#HI`jv(~_zCN$#8IqSq>yRUojX~Q$0XRawln%a^^65hHt8YG2uNn7p&B@( z^f>M5+ZR1A3VctJ4{x?;;n5f!oH=Ma+|i!~dY-Fr{-Xqp+S~wF?oP+_KPqGyj^O@d z89jH=V|wFDX@$a0Yzf?h3!If%U%qo$mpGI?GjL#ceE8QrJ&SbM7SI+`@$2L_;-`>f z@aCg6+g+$nJ#IR)l_Qq1S&lyNYrjOEymKcscN)q%^$TY#FdypAYvYjhTFkGa3a#p{ zfz=9STqL}`_+q6Rd!U+2pA1H0Uf4Ly;^%MKl(VpB=Lp#D&Cin)Yp7Rw2Mq@$@!~jT z$-dPY-1}-SriuYLjo)#{>x@%SXZ4^!|Cn@N8! zG+0#NfqeGy{rnx-$M!U~Y$~Bb@ip zW4GMqVtAM@+IBo+!}Mo-KrF9AG~h_S1AlA`FVoBsL{PRKTxRa zF<{sP_q=qO`j20%$7w6fCl!odR7o=~*3yIi1L&0?m8*}uf(!OtR2*)4IOvygsjPgT zH=B^8Bv;Q-mCYU$EIj(({%pDe?zwU=;a+5H>-d~*6dJ;^JC<(Le?}>I# zxNB)~3mi!6k3N`$7tb9$P&((I?A&yGTwa|tHixg{lM~xL_DmogZu4D*{!^ob$fdCq%(m5<&MDHvmjJ?BM+oiqmru-GT zjnSt74T)sY8$b$F4N8tCrNcbftkumcyZ>RFk3g0b>DkQaZ33) zL_Vq%rg;;^*?GcQwP@C9T`9KyaCG|o7TJ}=Q`PPJ=rOu!7r&Z|g}Da&oc`eA~q zM6&&rHScpI!qnF>{A=fI=D~mHVWy0!@%!fCb%xx@^HnSin#_W-&0sgX9nOthPL27U zAYV-?ACo&8KXe&{E2m23Ct8){#~vt`R#%8Xs{&GCb>KVY-o|oTDNtED3|ZxF(L*p=d}ux-)b|%ofU%9O9SR`bO&TSETPQ3 z#?0c|4q7pKJ8X)J#LR_#ShM2_?2*a2GJRs%r6qbOQD2O@XC!i?LIFqlY~godb^kqHQ)I)6YPB{Hjz^!JBAWK@vuD9%fd}klT z_#SM-?tJkccQ9&tXrXFuAsjK90-N#)(kx?G@EUX0^}{fx`Kny(^!_t;%o5ms@3*w8 zu?zW0%voiW5p%Zlz(r<@nW@@M_W3~pc`n}1G{O{^m)wo@Z}AaT(WTwmJ)s3Ql7CjNqtz?ImXi=z#E2t&( zX6v@7lG}~(^mynqxce?!G;v%`M{VZA#ZQmPWnM3wqzDHm=M3KEh+$+hiN=%_h>lU? zu_*TdsJy(6o1CwDR6XuOdrW2vWkn-c#+_bRnOX{*gaLE+Hwi~GZi-80+Trr144UG- znI5)WqnZ&$IJ@&$Ha?)3?0X>l8n^T_-M`ib z14Dj_-De#Ill>EzW~~|fp3<9Cy$3>;#dL7+d?^0d{u~0#33(S420W%J92Xub5 zBFFbE4Xd%I*TPoLrSXEy`P-nvArdM=zKhjS-GpZcUO<+k6)x+28WO%)V7~L&lB%D_ zNg{W{U(14^#9ClBkLw|@W(eFEk|?e_dyqYF)nd1QeH4Bt8L_d;)Ue;Vv10bK1}MpY zNXsUh(fY$5Y28sZcA$Tr5OD7RspVvl!C-5ab*@No<-7RTRvol3G#|P@S7o+=&&epG z7i`u#K!IDo)4{p=?E7DDG?;5E{J~~Ql9hn-k^=ZTcJcH(UDf zBAD3K2riv%*(=p>5_-G_L;vmc{g*wh>XSk13igUefA(ZelWSmjgfiJWpP`MaM`(O@ zA_geUruC}(L|^}{!c`wBT3na1h@5w#PDVHU>8pjiLRDDu@*2h6>>-MNelD!rp~-C9 zh!py%^o(+xG+4-3TlU=_X;)Ygbu$t)T=L-8`2U18K6b;`MbYRpb!D=CA z0WGr-UIo?3){Qts&4W$pkNp8GcS{#q7e&z8pRdH>kuPb`cCk&2sf!y^DBs{tDI%*HDN-+&kctzZ9VS&r1EJ&n9h&zb zhn9uR6eP(!-{)zAy_EWp-h&U&_ayT3;R$i1)dhv)r{j=lmduXbC?%b&W!QJRF?!q@ zCFnk%CNySq|F(udyU^gl3X`5v;fC9Easy|~JH8j%Ig`oxr#Ew0|3)|(uZm(?0*v|I zKzD+Rgt{CHXy`lYFmXg2hauTcAP5^eImtgwBynT_gf z2YEK}eE!`RR;_u$L1L-2PiYMs87zo?v*Hx5YgQop`3?Fm7)FV2vq9H=J=Dxiq)l=s znsO;dI92XIe^p%xN13yxenzZ%?;KRu<>!lI(^<-;Pc&-VA^g4NoalNp6^^#3Dh7^z z1HX;c(RYa#+c}nd9hLRj#r39&RV!p-O29H9-O-fAr~ae7>XUTywl)jo|F(Yw3tI4B zo$ULwwc>VtJNCUr57Va~gJ~~!LF>jw@qCmyd1-XO$thaw@yi678tjFprM2dQYy_Ojpm4=6on{MQe?;w?^zqsq^Kl{(l07WlS=dOo$oK; zy{>c4-h1umxo;L+rG-n2gSkcNzqsZE1(Gt`K)LcG@U_SoGb-MI#?`HYw`wU_)$5`B z^_gr&zZX5`jWNuZ2S9#-zUT#@s{H*|?hx9uUkX zuNc5=Pe|GKoc%~=B6UPRb$rRlOcw7SD2CH!Qs|``#>xCOz)d+8EbEN|{k#8MJaxz^ zZbQ%%X3{YR^Sr!hVzD%apY4Nb8s^mUpbKOszkzE9_dw0X5oEIdGQAe^sS8HThd^&d z7G?Gyjo(>CH^2Vnk5BI8@@N2a`g0x}Mt>!Xl3*r&UPljycR^owD;lgYM$fDcC_TE1 z{6p5!s(=N!e0Bk!^Wcu?-GFJZHdhntQX}!c%W4erp21f1IKX#_Y9{$ij(?;2OZ3>^ zpG7}y;ftSEa?iT1!;(!FSSaix>U0ib!`i!|{<1Gzb$c%R9Q_I+uG!-L;7IZ`UCFoD z*kMCyp?FA73*BkVMp=`&_$N{dW^9;`XY);%xc($}V)kiR^yv#9GG!86O}K(v$p~jG zmt!b@lX8;|p!UZG*mY$l{WYxO%io=V8IwBc-GE3^DA^=1NwdjC>J#^3v<`gvY(X{S z#|rtkMw~4;c-D>5;HQn)40#b_c{Vu_jS?m4fxr^$Q_q4++&)TsDv2Gbi^+G&3YPGG z0$kmt$ewp+Vbxw!DnF~p2Bdk@+L@BfE#*J3ozqz9_x&K7cjAZG^ua80SU3umUddqa zLka8+m1UES^JrdNmcYdp91$P0X6eR0ESYLYxtN-rSsqAj>s zaSjG=EFoRhEU~{q8LbeHWj|DEpzOjkp(pSStlJNQ@hmUa^4PWFT)=Y({No{7ub{&w zca3Hp-%Ma+WgY**^9q;N5F*Uy&2Z_lNfjr?h2iD_r})N#U;K=0V{AK}%uY9)g}pNS z$Y@@?aDQ+RR(IWm$38qvY3ruQt~0RgyE3b}pACyf?&ggyr!z<6Y6=lABHf?n?9TH* zn0e(K*Cj)|@^K-%Umq$KkD7uT)wXd8yAP7A)n=CQFM(bwtik7o#q{qCgPD``!QW&g zcuMc0NkffTXx?b%Uv`8mNcCgRnH;;ZdJH{L8b(Wn4Bn&gXVe&Hg?&r4N*;c>0)uN^f=LndSAv^f_@Csm`B+MZ0H;5{+izTYVRHN7&~Sueihqt%{{yb1&AN-%Kg5 zRkh>fej&sv@ zqg9Ct7S2_`9eXF^8<^SuK3 zQ%*zJXenig*{VmIU2af7=pXUrjI(sy`5!o8tH8w_i~*k=LH?;6j+{j#@%;!5Tq?;v zuIl9%4xNl2pTx6xG=O}iWAJTD8h2Xy3p9O6L+zA$I&?g$GDzhFAD$z)>9@~E=XpM~ zz5hJ>IVxH_qspGSgf$R9r-pMfP+(!NXW|2&@mMm+k3T9^1Rkr@(WoPXeVm}eZhW7G zA%<%BwqOW))FiVOuMoWUISTSVe}wDp15tNt3d@}$kDmnp(ao}R;#j{CqJ#F16yy_& zBc47Kmn_j@7waq8I^{y{iso3hGiDDr{gQ;ZvyxzF^*fTPQ^a{kZ6I29A!-eo$_1+! z3BPY0es{A+^CP#Q#4DS5epkX(Gj}uHV`1c0Bg|mUDrr_BV%R-(Ebnr_AuZ?F^S@i! z#%47%IdPLe+>W?$1ZdBV*{6Y8LJNw}-i9n1e&SB({nRAf# zx)oI86Isp(ZCv;#g$9fi{EZq*;K}k3N~KKhzY+bgz}bwSbZjzoTI?egD@6TK>7vh9 zU4-nh92%`$N#$}H%+$;kbk&i)y8M%kZ4DC@@4XG(L&Px7P@0X@TSLcIJp_e*e_W$^ z5?+T3dz>~u+@pOFoXTur-I;6TyjQs0-FOBJMi%pX-WAfle_L^v;34}kT?M+9M{x-^ z8E%sfrt=z%OJ1qQ(gjw^_;@9pc?j8#@b~n+ z5@t#tE*A3rpN4^``4!zbZ^53d(iMFj6AAT($6;R8A1E-$) zVMN?_aBFv9XVkTkf}6QRV~)ay;5x93H%4c#0#K_HyfijhP>^Y?g?Q<%(Ff6&2C zzoMz+rwGQk48_1jj$m(~#u^HNN>(1?mkQm&m6a#y)$DPsU*3u_gX{2PNj02vQh=nJ zqscx>9^+OF1M9E8Wb72kN6p+ti?mhn-6jp}_^*b%4_A@L(L_;~LyHy&x|CI546V~&^vu+4%ntmQP&xha}f8euv zG=1~VVc!lpQ=wTDY_Z=-`vz+>iw~i2Wf{FT(%Le>n6}5hf=CpycNH)C|8#>jPu9^`)nq|Y&8jqT++E@tiYUffo#s!Aq@${35UYPv{NwjR99XKr zo@K7ZMN`GVZHdAyH~zvrRZrBDy9KJ{CuzspeBAA~h+C*Ok(IA{O)sWuvp1b4Y}rm_ zrt#JvGlGwh#5Efz7{8X)4bQU+Gmi(aFK_s8;qjARsAB(R+YWYldmJ%Wb+&F}PQ}@w zrDP*>7DRrktTE^X`OLsrsfFu;GY2jJ_8O z0sMN=k@!~fqrVh?PlF}iNTr^6s%(JlF)*H}!>Z(h*@6oSIPSnwfu|h>=dM{}i@-Su zkXeQ$)@RwV`r}M;)d@btVj$Y6RB%)O^JP*~-b0h*7-qZTC;#EpQBFywm!5mRCm+qv zAhF^oNm@>257(c9)Y1l6@lB2W_h=0oYZ+AK30}{y9-*Ayw(I<&awX14n8`iqIY@b5 z5j>Kvu<-*fFrB7PWEMOD`-)9*?cZ%w&S!yJT{5R5%$b@BfysyZl6!}|{a5dLHc0W0 z=4!xYgj_a-z+e4zr$LdOG!gvY&*#%O`S8m7pU1H&rNSMy z$#MRzl&?uE#=;?KfY9uZNq# z+cgDz{J6cG_Ne)25cZC{91zcH4ZespKdABh{(Xo2XOhS;=rHIwPr@33O>Hwv1+zj< z(w?P8aQcJ-O_pio{dBkBo4zC5hfOh%b=Dr*1|O%t%U;kJMPZL;xPuwRJF>urJigX3 zp6%*g3Fpq3pwU!wjJJ5o8LCU*v}u#b?3XsmTXj*-_oXOWCG^NQt;UclZT5l5b2%6G z@RQ8d$tc;5X3RN6)yGm`a`aLDVflNyF{q5w@$I4=GY$B?A7%04*d!cV_8p!XPQ(4W zr6S+kYv7TWz&4opn|J(onY*S0*jOviOSq2YFKpaF8aw57*>$x3A`xCREePwm6AgoH2ji=XrNaAv zjaM0diG8?R0@|~#!CZF-*lAMC?YnuG9(N1#8Qomkp$+is$V*bQ7|ME+!eFZX6}-A- z7}S1jpl#o4M2&T6FzncCy8G7{WSyJr%I=<~m*=KXsla1ew=jT}t<+{_zhtSP^)e-E z+hDM_^?`3`Fx&J3!>Dy0|iW^~* zQUWR;+YfUbZODCL85zv>!#;vO{Q^iwy~~KCl+p z0NbiZ;SAY zz{=jJGmX?KY)FMMIjM}LowXf&RT<-g{6F%JKc=x(i9i;mEQx3OH?b4FZy|E%4+wJG zO_F{RxJ)UJOKAH9V;YIAvA+%h-#oZ{k`YQ3{$V?oy`vSDw{Rr2F8nRtRd$}cUBk^97E0Y_djlS0m zS?jK7roGS}3LRpZz2;OL9h3@lBv0|l_CrLIuYcs>@E^-}*@90; z8dj-4{fknTZ}4P6BaVJ9jmIR9(Wm31nDL`^cyF;V2bMKpH#XFg`Dj~Q{m{7T_mOBQ z%^A-Q_RVJ(V#ih03E#)TOFLnVkoieTzs$mYM}qn@Aj3f|oa*kAbna0O_t2#rWKS=} z2FpS;9&SS&>y@j%%BZjdFWlj8(|YuNw~IdO$=FK=&cr|OhvBStBbIOWm*lekpw;ne z+A`}b7;IL>FDYqE>g8;rZ?d?q`4G9~crts_nV2l$$V!`eF4*cb7#YrE!Glumx1Tqp z&$W@9g=_@M+M7^>$|r7xtP-AbQAe3(2N?5qA1t$d2dEJw z%s1^TT04)$Gm6t$_QDZW!=9#~!h>S6;24`P%uOxijM#^h8{xii{=0H#6sE?_=2H&c zf)`UaqeH3(@A!5&KYMs2S(O?<>;rLlu2gkh} zjD3S;S=r-kqI=Ooe9-fxT~U(>58z2VFz>c7a`Tg|iYkh?ca| zt5{U#F`Kh{Fbc24PQ#L?K7y|(iahcXQ9}7q2+ z8=piG9*mZ*$)Ym7P%4S~$zN6xoM*Q8D%*V(?9FS6;jmq-XyJ5!^2$zzT9XXX+;_+M zzZrGpIkSp+UW>u&hWD|)Zv`_y{G4Xa0suH{HpOi(yNaUxr^Pmdt$dFi@-9juIp9!1a&2LH%X}9J3e!2i9o{ zjKEy3w{|<*w(1Zc(BURHRZg+M5(_+5_f{0ay`J3I5eBhu9??Zj3)0-~N#kTV;azkB zgA!BNkk?P!g-%h8`wcqAj&T>h|AHmf1{i^rFlO*RW@PDwek%r&jgTpxz2y%D8x+FP zdk5j3jUtPQnrtWEahEP#wZu%RuiXCFscdQDeSY&$U+Qwn=UbOZWBQ;Alr^k|f}dpw zIq0`sfc$Z&d^7`|-p++s-+HjLT7+`~)p6L6Y=OJx1;+nPsx1C7T9~zTg6A$L{-b&h zO$g2u^|o8FuKPnpf&VRlQAUPT_EMGjfGo;1+(o-)RlvpVhgt5&SD<`92=>UzV&{$SDD>q@xH{Tc7GIFnLC zcjJPZAh9CnJasv15?o{N6DD9o z?M9}bl!K=RaOe`z1Z$*KseOk&zO{*@l`U?qt=<}ocB{@nOz$CBJXMng?3#&RBXr1#OChJ6(_&FP;JO`( z%e%` z6h8m$1UC3?I2Tqlffa~2c39(!xJD&~B?@^mzjjHcx^^@Sxpk%xkUz%K3UZQ}#;kca^D zr1^Z?w!5Mqs#D2JJDF?U7sP74pHke&46Hl4irrOn!2e#@(WpJIU3?;R5sdUJffJnBtcB@tFOx zjwVDoi!x{KWtzLp!QEmcp1E`Z`bEB+rl=k+h`c!465&o`*+{m^Ob+t2quI`d}B=!F`-q0O8Z> z#4jy#A#7zLd9>f48|k53*?>Tj8+xeH_vCcDA(A&B?W+kn^^az0HO{y^`wg${+(We+ z`?$c23~ri`oe#FHr9P=6AksU+l?N0-;A@WmCLa&|<$`Be$C}O#zQ=j?sDj0(NAyX{ zf`4b71ZH2F=}SvF9nne==U$2s^>jSt(po*>+y^b(e5{N9=zO9q9T#ZyRiZJ!C!kUI zPjcMvLhBzX)7uzh{3CA9i5@+zLkTs=8!EO6;_+)bgmTx}F`7U3Bx37oM!QH!1s=kz( z=k5T{xF-6cMzpB&5MA-w#~SBH)7FjW#VKh{%&gCk$sfyy*V|U2vY8(a`}hLBf9s(6 zk#TJO2oJh;tBSpdxg$bk8j;yAkU-6K z&MeUP7gv09BDNgg&-6rhs0Ou|`=t~d&=U=(wnhz&g6}DZ4zMPjwR5_IB31s{LBJ?wurZRKG!7PY5+E zkOvq0K2SSW{dUrpjZMI>X-zke$&hEj>hX;|_5it#I0&daH zC^Y|BC{CK6$6LBT#=9!>#U&aG*z~h6Y2xXB+_SsG>G(&UIt>CLZH5FUKMjF&=Mda@ z)PhYCS1_qIL)!X%0giJ!j+<7`X0w9^u&rz<#y2UmxXU%_(fF>OuD5E)*Cm(EQjC z$FxDZyST>m1I%z)N-yhHC{aQj~a8=vcuq8URdjw4QZ$M!6=ni zI5g!nyuSSpR6;V~^ciC`Fqscun}%TfzH~Z!b^_k8R7Up$+PG-1H>$ej(BtF>6y+$5 z50*;ca+xC5wn;cUHVY2Isj}quJCY5*mIFWJ)`_?@|xnlsWk&d(A~JsOOiiZ$r7Xdr9deh^yK zns~YE0kmjaIzCU^11)?wW{p>bm0k<**MA3y`*W?_`B3<=LIUoXWbu#2 z?}F9ij?e&ABh(m`McrSbnXHT-XDhHAhjq8ZlAOcrgK+=X&PL#;z$$2|UkR_xNxb$# z7NwO)Q>OYVYC2Z|7H4H(=ko%7M_~*zTc^ynl*MwsYJcGM1~dG;V*>mwEU@dD7)WpH z)mf58ICrkln|ma5PtA6&W%cG7bi>3BdmE!k=Bz8eDVxIFdj_$Wt2_i}Rt(b{cmSph z$l=F$2uubg5u7T@g1&)P_<2u<*kt#1{@~QBQ2w7L#+6Ngl1B@fS>S!pzpjcFccba- z-*8f%tjBs@DiF6u2}_X_uM7o>Y|w&NMUk~KHazNtQ1{6BKGJWfQ5!J zc*!xIHpj-HPKX&-HqL=g2wf2MG*32sQ#Su&~fwRp5sz$QWuKQtX=~k%89gb5{oFJ<$ zA9j4}4z6x+r{UD z^vUi{9xwI717mj1V5k1gVulIY%=)#!TVH>S^PqCBsD1>$Y5XUQbd(_3qEldfb|I@d zSPCiLpXUeyEL9yAUf}{N+ zoa*pmFYmr5?`SP{Wl9xW)HReTX-Q$FYc5XPB1zRhK8yNdH{jUjvD}}jHuNpt4Bzdn zMDV-Ct36hx7k5~tYgHP>A96>j!c*W@5LUTg=px57a4cnD602Y6%Z7c7fX$J=;mqq} zl(uFNyxz~_{`Nd*i5tiU{n2KUW5)6;Dk8wzD;3_26Vo0I4^}d=5}c zwDL9rhkw+PhkWLK296q2AmgVEtvX=Co(T6*kAjN1Pl>AdqjD%#`G(>mi<5k||0#-l zcGd2X_b#%(kqG`X{3%*^$KF<}u?rgVETl;Sb8A;aqH;BuPrb`^R+K@AkshuV_|no< zkHz8{<^0asX|!g~JSsK4O$7(6SyP53j4z16#ahSte9=KRG-Cwpy&C}E3(oLCk%L&4 zWgFkF96)#D!tHF5ufx1_SqyW!4UWQmdikDIR{LK*o(oCg%PJDV_Tv#Gv%IBJTQS!21+;C5O6n&qFv%ku*aW>uX(n}pwc9+6T z@e1a9GaVdKIyl#Xt@P}$25nCk{ACA=VeHq}e0u&?5NF7+m0}%Oa=Diq;kuArJXJt? zAAQ(+m(kFv+z>y;lu4 z3WQt5z&cV4y2iWRl4G&P()^v~H2A0IM5k}hA@i7PoX)LW78tltx88=QK2CaMOQ(<$rH7CiC+hG&@QcMuazkip5U$wt6W$@gx-U26clM zJb~8PN?ccJJ3n~CHPUijj1@1AL9LYwcO+sP`TyDuZZ&$W^&8Um$Gu_$w?|}|GL!$k zvIbrS7@?ANC@nbdz^C4s0;g_nK)E%y`5h;l`HjBuaDDk_-r6pdzx%Tq@^Yfc;hh1? z^eraiixP0sDT+_2Plh+o8)^Racf#KJ4&;S}QS$B0pd;4D@R>94jQw(&Iwl@AhK^)I zyPk3neq_VFiN^S{Y^x}H$sm-=1uB~B0GUlZ*bW}Vs7R5;KOO`LmuI5|chauW;I2q+ z`7u%A;d|^?btIo$sK6e(lz?fpDKkBtOMJK?jHGI~uMme%%~q>f3Gi>-IY4l=lF9U(BO& zRR@avyB-4s7r->NY0%N94bP`I^Zy>awDjk~x1(Ncq||KT87&0!tHbcVnjQu| zd;)4V&XjxnKINGYWpN*;(1L{WN|jCBbUFOb%b+wj6BGsTqEXdp=ynf-m(xsXP}Fj2$0+fG4NuAI z=zbXV>>cbVd@i=V@Dom6wqYAD>2RZjyy5ihEP%#Z`tI|L58Ql_m+`q+X>LLMfM55) zUG_1^xtDR1>_*WG!_&gI{3QLj*FtF(N@O87iX;mb@fXuSQ6^Ynnn?iFcATe&HC}c_ z7ZjmYuTz}41)wM`KpfnZ%4Ncbr+jR{+$Jk4;#w2oR)`&k6qcBS*~offg)9w?V;jX7I>>n zi|yDg&!WaT@y(7rx9h_+{>9Y+*mYnM{tMj?iF?Mf6^Go|sf{wI9wN9_rzBHU%@Rzp zm0@$VK8bgB>*4_;9Z|uke!$dfvUtA=8ut+2$9K;of1ytpDb;_hY8;-y<*%dZX0p=2>L z9lHapHX6gp(H}WAF%wU(P+(KlFVdXXmaJitIZIQPh3P^`Zj$FDVx9e*>?S?T`f!-r zw(ltH_dkf|wdL_(xdEOS#aHSaxg+`@T|tev-N@?4c(4fagJ`u>YV`a-K@By~EqfUX zy)y7{jSI8Py-thFGe~>58cEz#!AE`7qU70H(4cY&TG#c1;$MM5_3!}nZfm11r&L%c zIg%ZkmCAi?tLK+EP2d%t3H{h;37l)BMECEQu%f54V8})zI#l8f&NZ_5;6D*YZA;>Q ze$}I`>Drj(rOR>*>bOBCjVPftK)74c#6?A=6dwDDwl1vYht>-|k@gnYr<=xWreC9e zdlP<8wHjX=W`NBR&nPo0A1c$@AUH3DyM5y*RBIdnBZ;d}z3nW2wy9v+#FyC-Y~sbDL%G~5FnX76Nm-a?<^ zlO5X`p^fW<2XmR-wzz!CUtV^#J{ve*0molRWF}QQ=JpyiSTmxxmFAHb!0x;%Z@GjKPk6fn2CUkCvpyS0PV@>+{3-xv0Z7H>Jm`r8(;ubH|q zYr#&?+czH?6onQ3WF(N11F?lO*!7MHY}L{0oP1HOSWj{>ZQ;|QXiE!ey;_9JtZbRV z;8=`MnE*T5P7C+!MG!RUBpCPEp#JjNwDwyPoZZsL?^=8bj3r-)l_o!=+>oc}B(Q%b zy4UazUrMuYs^7TErD3c@R*Fp)SYrEI;jBr3E$f_Zfj^qbGXC68*T@V}ef zpi4rA;B5|wI9qnAr?b*_(=B*9FG)PrJdn?xC5P7}Wx2cWr;6@a>q6vVLzXlt1#2p1 zvEmEHF#VpGJluxi^@|$lGkYJbSr$xhlzK#y9M6GKL;)yYP-P+dy6j#|2Y8K@g_E)S zc;BpxY(S43{%(#Ww>1~w{`?dO?f8$UWuxI*+5o0(dyn5X^cm#eQbtv^Q8ZWaH7Kn* z4o>S8aNZVRDJr9>X!1tveK3h?4*1|X8j1P>XU4*?ox8f$mQ{_t36uPlnDy8OehwRk zdny^V+&f8bS66WrJEYL$zCX5SNWtE~OZ0f$9yU`>6F0`6g{G0oux-v7%(y!dBBI`s z{`Nr_F(eCi80LVL4KkDFLD+sfm&>%f4e#B;;CH}wzVFOBn6_~=t&44-AImsccGH5b z+*!pdjyOi|W!A7D|6T0$!CmZ!!2`-TuFsCI{RMWXRl&a`0gBHJVIS6e(x>&^yb*g1 z`7e&MYr6V)evfb-pYoeKGUGPbq~GA8m%pcqJ%+GcHWAI+{)6i04g9ntyXd98kf)t9 z30uc@iu|K(Fy#I?^l;ZhjkN;WtMeR$d<@5VqG-3D zBFkZ>C_Cdg(@~A&b!D8{k@!$i#KA>uezPJsN5Pd@ReMwSLSLG4eLo1+H*6T^ z&4<>?u-QX2*wVBY^!*Ytr?63M<(5)7f8Z+rX-%-P#(Gg+YSGMMuGHZ`Qm zvw2cF%sEAa{rbC+C62ttU9o({%}zg#>aVUrXMO=UDdY_MBMxR-LN3m&Hyia9&cOTy zy_}KJF%s*IX8nUdL(-#LuwAx{J+bm*ffY@hr125{zOE#DA?!&dbETpBcR1S;pM^W$ z6vM+m`s99FmV1z;#2&4cXJ4E;$zf9`YB?!FNJ;qpL#kmKi;hrc-^bh2|2we5S{_sWW_|q5Z^G32Ll`8s#Qu+`D8+XO#)VB_ z2bA`)xl_lJUsNTTTvy|G&8K|3+e{o-`Ib&d9kAImt^~y+H#4j35$yMmGfX__HaG0r zCvMNqWf&n|Pu@xQ;BY|(yS1$ywOyq-itU!LdPhyTcWDWJ_TCPq_tj}yNI7tUrcf+6;NpkACab1IQSZms zq^sfyf6u&vx3RBz@1?<{p=gPdR=Z(E?lAN@Bh4**Yz~tzj^dLPF4EIwVisC-jEk}2 zpxa{+{QLGD6!fe4fc|xmY4Dl<8x#*)FfF)+5 znUl~3c=uKlX1gp0ci$#Rd?Cqxbt=M$rQMv;*G3vXUxqt$`3s=)FgkViqB!kjH@TFi z3hPoeJr)TZ=l5dX?LrH`D7Km2PG1Mzt{t$ZT!*|ZEU0l=KDQ&)fVpkD2r=dieoxGX zlFS&kt;LMFw%Fq!`v$?CRYRIjY}kUW){J|pDemaK3PV4*Q~k?KQ2L<(6Yti*V}Ct- zvO0y!iqXfkp+WrDBcoVb??hC&G#E>i;)EHBh;3<)X6G%_#S5lC<+@S=ae8ioNN4mf zxanlcR`ecbti?{05LUrG%GyIYC6(;n<$BuZ^o~?#R&WNfFPPPzU=|!$&uyHpiWTLO zxLxuZM65X_@YWOHds;X2wdk=%t8q-~;~E-1VZF$qG@0Jak!M9Kvbd;?v6Y>1iNfB@ zoK5YR!G0-E$C|jYbbXBpu3ekN^o0GexKNuWo!CYu@2dHEQj=L-*Kn4*eGh*mC`0^x zsy{>?N`f;DDtJ&f2ey3gh1(3;|RRBbT1e8 zeKVO%I7VN*Ca~hx(UiYvKb!v85Q}#UowzMo%qT?*j7n@Uq0$1%El$z)LC-O@jDt2m z2BXEfEM1OcOIi}x&+cZ{Hs&?`t{aM%pP%4;9?v0{b~!fb-~qDfdC3nCQeht&=disy zvY~y{C)|B70zUZ6WheF)L&vLFX1i_*mR+iWX%%ZpB;<@1EGe>^vc|beRhSc>xhLi? zzdeH6optb8sych*I*ZRVTmV;;rn3x9Z5DL=6L&wpoXQ{Vz*_NN43*PB>3C~)r@)21 z!b~pi!(w(@=tB3}tf3mWGbo!mkA^(|0jUBydZ2~{)(sfVtS_5lhxq~eZWRJ6&#hv^ z?FORH$7Sr*`9xS?BTbhw+*nU|GS?k@0B`2!Q{8g~`^oYNl%<>i)l*Dp_KGeXbkm*- z(^lf0J#4_}s5X;Gnap;sl*A{RrZBtyw^%{QbRLoZ2EKQb=+&&_s2cMW=4$?g{jK?2 zN!w25_M;j!#yf(^2@zhpFrFoqn{hA3Eykh#*^oR#2D@Hc;h4ET?4$78i*xl+Pivy+ zqG^rjRYE4I)h|bbNg3o2Va&HbnaA(loI_sco{2SPo#kJMbHS@ziZxw44>z-lxShxS zSYzTu{CRI0_ivIZI&E{Ndf&6OD_|b-uG#QYYa*H5Qv?61Y_8m)8@8Lg;wr+-*|#QF zno+EabIrowb!ZGMtWYHzsW7bGHi=IDH<+FePNGs_jXrWmIIHIyu_aZ1c*mA;AgQWh z=aMPRP@Cjn`R8!7XwiVN8z5n86f&)kF5=52Wk9koVbGXy-WHf=&CS zz=u3iK%J?pm}~H8d~ZIV-mFcqo!d1Vb&6b2F=G#Bm1%>&U*u7bXF9|S-kh#J^D#@K_#l)SkfFPDOnGa{>LWJ%II5Wh`KbE!;k= z$BG{n^Y2<$z;1svCb(9ZLcKIS^p0i$W|^pdDF^fpN>g8C2h<%eW9rlOS*lzd(>gGo z9UI5td98GI;>s;}|8O9dZ4ubFHZxf78D%zppd(lhNoNMH0)dwCbmG?od~se_{B0U>97EgW_=I0|a_}%k* z`NFlvsB@r?@;_N{55mNB<<1vcdv_b>C%2c8+#k4Wwi*|oy^2W>JfYE10}bbnWwSrz zlkC%3_;m;n{YUZlPPHAELJbU=QBi5bY}?;%vj1vsUKi!UKzaI^JLaG{sZMK zdqa2dD05b-!gE|WRNv9Z-M%L=G{299-*jNf8;_IaUP*j*CW_q)KESnS975X*&G2){ zS)uPg9hI~-IQPZ>=);vblJOdh;q&sq;N=?L+;I}Gm@7j|Xa2{}cx{1O$KPR}Ql!`f z6Fs>5=Q9n|UW1C#pG1#N_)@(vyVa0-OYZ_Sm|w;x{<6zH%JUz{0NvAh{N9sB@YqlRoeUaa;_dl3E-{wQ3~^!m!(W4% zkfD}8)CNj*#gHesaMw<{$CnKn4*ojoEHO`!3r=~&ZwUQPcO)al$`Wf?h++{Oa8brD zC$7O2FA28lVJx=TWz9gxRlLaQ=_iP}pEnwsmiY_)tC%X2;S5 ze%C&>X+r_D*K~sRjGO%OphIw@@gwD}oP+boiP^RC4G9&QF~w@KKU* zznmPFcjrQv?iGltpTL4AUPV=teCDlL!LnWSnX7X!o_wOpu1{(pD*VW;vX*8K4%pz+ zk6&r-w@{YUyqHFiQ_%7fL$e*Q-j7 zn==IVjrHZ9+}nw;Dv*gBCl>>V!VcLroJt?4Vdopa(TrGF*A zz~KeIw>Xn)4wb_}4f!m|S@>P+{78E5cwBnB9Rg~{u&r+&^3hYLq3XTo{Hi#47VvB> zU8xCz0hgv@ppa2N)7T2*f10pbwX33mBThn)7LV)0jWJ@w8?OKNRvejNPdzn^-u}E! zo|%a({6i3X@NotDv2@ZDI#C|CGU2)4QMt0M9{Y6(T&`WvmniwC!0%))T4 z?C}KXavs6lZeJ#IlN>VkDTf8dR^TYij8F6`;<4QcY+J}MK6}U+>WD38rq9jc^16W98!L;sC;PdfV`F6d;rVO@Z8Wy#YKpxQ8!akuqRf8S^7 z%QC~WCnF&3hnZ;NhC8Bd?cVIC?k48gx)&EGTGr&gb144{=Qzm^Et}P1=2fzZkuO z9ZnJMUPsS{Td#cR?~)z74xO|!>{-q7UDGgaxhg3v6S{%(1g}ShI^O&*oBPt+$7{Wu z%@z(dWOJO_F#T5rTN4w&dw%s~Yi~|PzXL(c@^%%CP)`PJ-7fN<(FwK54yd>~n)t>4 zW9UrSvFf5QY?djKF_Jkdq%?WYT9s0YCJmH|P$ZQ|N-9EpB7{_up(sNs6nW3uR6;7H zfi#aQ(M+?x^AB8?_nfo$TF-OeF_V>2H_)j?vb75b8-T`FXLNM5VxMLTe45~V{BmF~ zPMdGTRvG59kWo=g>*Q5@=Gq815=H?u2)*X68tNa_MybnWS#P=#U5^_kaGau9eA8LZ zBu|cocFv*+BR-2V3!5O{^$h3sN!wR+OjKYm2NCZ z2+lo|UH8OF%e}Ex=-#T{suzzE`U{T&g)?%4;Ik4~)wj&*&|!27Yg+1npH9p}iLPJd zvcwJa%tz67BSlPF9nDn^xC>j_PKsimtb&lDE6{AA!-gNdi4Ixu^k(!$@OU|cDeU^l zc{kqWq+z@}P44=-`yBtQT`Nd3b1XA&VB{0M~4_&q@vpaUd=xL+{BR5p?(7T70EUjdZ zrY>XF+m4~;;Y+Oe**J>q)#CRBo@B3AMsb!iui=$+Sv-F66~Df80PPVsvrP>eSf#lH zeMgwH%HMIYW$zy1z9+F#y;t1T*~j_X?6Lg7t{nDX%ne96X^1TcHUL*+%~}JNVbscn z6gfcPo5zpFc1b(Yt7uc^wtX;3_YT7z&*^wyVkhlMN`+kI2XyO0A@hE6h87nIj&9B6 z7@@x%*Nr*Kw7<51Q?my<-t9-p8KWT8REGP~QNXOKqfxa-oh3f^z|YN*@LBq~c;@1x zlYT`2Y!V#H*#n<^vw4{ZL%58E|1ovb zUaD9c4#QKAp+&Pj%h8&G!L|o5_pUWdjEF~7XAN%VrUDp0@h}?f(H5D+rQy9}(iD3# zpN=ISg$JvxaKOiLP^*>-JG;}lr<)x`TA$;%z91pnRvJWOcj?f^R811tN5cHR_d>sV zEdI0@!3K@F&982kV5?>Ch$0TulIw^d42{^sYn0yM)f{d?PM0H?fjX>e`cTsm-^gt@ z5xDM^9)jc92-e9QgSXM?*c}ncQa-9;Z?`tq{4C)-J)a0JssU_4mJUvt=YpqYfQHPJ z!OlrTU|+l(o;C_(W9L}l?}kgTqWdwpuD4`I^`?FTvtGxolb>Kg0y*iPl zwY?X+6}0f}8?4xR2Ol>3> z$O!I^;1ho6GP}K|sDG~x0|F}fgv2B8;F~pmK699*|GS7v3lo`wx+^rz9mI@sQ<(W* z6K1q2ngt&h!-M5FxcxuRaqqsRM<|o1ap@(^IyFmWiK=l>7UBZ?py-~ylXXix$^iBim``K}F!JDJ*M*xBIu!cyn2rxY{4L`Q(Gt0-^J`Ynu4| zq8d26XBD*t?c=l$8XNrzkPE&t>dVus{8b?&nWx zq#?gHl|P^6O?simDEC(fy_efk?+V69Vl3M@&lxR_4#DT<%kklTFZR`2l^v5ji=G#3 zfh+H)rE>jT@Xmi+Oi3V_-}sI%HxHyhy(XHMQN=oIa=859c=oOA9Q0)h^ISWQeTx`@ z&jdEv$OXr7%b?w8`z{(jygkb7H^$M^A`vUCdnxopqab2+0T$YovBjsfN%mb6j5skA z#VZzJTAK{k_3XzV6$zka^#IcLdQiY)8>Ttr4E#HDfs;OS29@s1ux*Qt#167rc%Zum z8m%4i!j!#yUWGn8c3+z9+kTS@Z=|!*uPbQjcNG@ZCQbQ@^|;7Lj)n`pJPCuH=(tSC zHa<(l6Ft(CpTZ?vb9o>3w!Me%FD_HTiaJ;%aJIfT=&;H9199qCTb3=l$i@15O&U9F zD))R}AGh{fCRl&`OKyrCWOQpcA2U-MjVo=~UxRTdb4s5US?FWwwDbJR#n$+%aD}i( zm{0Z&`?*K@_qgsKSD8gLB|!w2a-D zaUIQ;3mu&s)zs^|iiLgN&(}@gLf(^dDSP>2Fg9;um+D2#|LOyhIxWS_?XJ_h%srHU zScLAK6<{zs6{q!Du+Hu;U=VEwQC`Z-@NXuWDjcO@TEbj%fv`gicn&7t2&&r^;G6Ob z(d5_1F!bGRd}UGw{Sj%Pk{`{|Hw5Egfq#9d@C(e8QReQPx5Ss%Mzh>Yh4y3KT>`z* z4sp2DT)c4R1ZnQ^h1(|G+{Y6o>~7UJ{@NxIH!H*ltW_mgk@^!p{VpeIc{R2;O9D;Q zI+(4pDd)ay5ns%`BD2pkx#_p+E{NG=;m0crAQ)` zpc;?4AK~uA6I{33XBxJU(UMtzpl+`%xCC`U(-$9>m|FoqR}JJBpb><5kA(L^18l^m z4ygP+hPk_I3p0)+EcT6p8`qmati1`{%`7nSss>bE%jPV1PQ@kMripv1R+5+FS&+>L z2A6Z^xkrOKpu}!B-qq$<#!AEiOFH24)eRt1EW(r@YuKT`2Y5A2f&Y1MApU+d5~T(! zL-msyy1iA{mugm#PT))Uv+z3E>hI<@O6}p+d=)sO9(nM%+J?ep_CvLLB{#d_AmqYS zd?(B`_P)r13k{#?#pqMeBXr2Nb!Kw~ZNH(qp-CjWaR!#|zQg*isAAg#EzUh-0qH)B zW$?{{MFhEU=e%9HyydD8?&AV4E3;YF&OlU3&|!1uX+n2OD>o@emO?&z;dA)A3;?sqroj%led|0l9n{-Vq@tz_B6Z@wVjX9EvJ$A=iqpfV)^`o z$2%eBKpo_!>T&@VN@N%@pH>(AfuwtLaQ>kK=n^5gTSNTdSZy3xhS}5hoH00UqAT_v zEfG(5FM+2oCNry+0s&e2rp6r&xZ9mdIQ~>M|CVJln{}BiZqXso+D1?+^q?E;-@}ka zsyOF`H~VOsO6+_&`NWulL~b7Y;@toP&o;1i`SrY0tQ~3}^=0ecNO33LX~C+f zFihh@pyQjsdKl_X<`ebV!;z72qVE-b{Hlw zm9h)*%Hd^f%+N-@d#C|(QhdmFj{Q%3+|wBc&FB;OohhK!e}Xw1o5H*52o|ojk^9%y z0MC-`DE#&kGRzL^7!hM9r$$D3fzLx z`1SS#cBf|!*&Qu{xb6Gt!i&39wfQj3IC6`Id%PoV+AQw6&_5l-tFw_;JfNf0g}u_c z&*PCUzIy&_;TwM!X7wnLo^B>xyr&2u=}WNaPc}c^qXs&IoLJ*R8MgD$Hb_m#;=d^r ziQ<$BV9)D8tR?p@d}^ttlAXuEQOJ)BX`cz_^51juXFl-5eT6($7<0mXG4q-4Rc-#iRC)A$vI0(5_P~;_N@9P{5!5Wa%QDzE ze&fk3FbUK}-SefCZB+!@Zv%c8&a$sUHj9sFTo=J^XIQ=JBrQ6~!+Eu_nBm(Y(ph$y zCiN)Oh?-K`Xx&Yd41SRFY#Vl8V3aKR??1N2>?F#5)U-eEKe=XeKmPm9jLfGvuwP^idfNo6J9ae6F(_O@u$1=gudNZUU~Kcy7NI2+v?N! zxd)%ZiYgT*<989ibaa6F7Jv-7S`aH}D zwbm5UH)kORCLH1l{r0n&aTobLD9?UATfij7Wm8?)SQhU*kWD+jmH(^mLaO_0$l~XF z-e{tp@cVZ`GutPkAJ%&ysz@DbmtG{-U(W<4Ruc1me-Y}#8tFI6Q-)CxR2%lwpMv%9 zEGM!8oTp^nqiy`EZD+?9!X2mC(P^JAln=?sp7MO-H!(0Dhee)r9&ixkzAGJo` z$F3N!PJ&-q7x${IVWz6H;pgkq^kZBkU759>Jc|=Jds!n|am^nyM|qQrnF*#_Sg`co z3MM%>g9Ye!!dlsJC{-56?HrVYSI+BT+Q2DnO!_&lKIse^H_oEzUz=!i-e!?ec>p;( zEx^6^bucuStNCGM3xLeTzjFL)baC(_4xWh2m5__DH99( z5G7K@9TJB8%71C>{24>mb}tv3_G`iNgE#rRiv}@o(M%ZD*-hXO#>zF6QINlIO|gNT zo5gyPtdE6PGxNc;;SkN+F3IegY_U5{I2*j(0m%=9JA%guJnp5#{j_#wKTRHTjZJ}Y zw^R+EZhT9>etm*l#m4k1Y65HjQ~^a*lg0a2XV8aJ@x0^o7UJcNN&8+QsmGj!rmzKM zqgzWg)0SY$gliP1_k$13x&=!5{y1)tE_^mTNU>F&T%*9`693swQ;u2iyG($bo3yz4 zkppT59^ET~skbTD%O3YP%;WRxu0pcFsc3s3Vr|#z#WtV2VYcgUu!;E0#qQ8%8zs{? z{@f%uqIQ_Z{q@J)Vh0-d?*>`7zu+onG;ueAvgp0wh)5~zDOCPYzBb{F4V#l` z&3;ZA4AqCmu<;tl!F%yo-XgviwxleCjh&V#5E;qvRyLEhj^m7S4{*n4HuI}D6!4tC zvS_$;6l|N3z+CJCxUpMqQJDKrH~}-*i;BGvym>Z$U;LTVxam!IRH9J(nJU_x5wc@) zT5xmt20XHM3MJj%hu$~#qnULQ9-i%smBW+-zu+4oesdw_9HO-*EVK;v4B^4`<>#i&VVcaHFT!`6RkbF z76yt0X7cGM7=I7(3@63*XP3}~H&WyvcH~|hb6|%)CUEBC_Cf1QeOxm>RN$8T!NVbr z_|UhGf4;PbN@h)jTLqc?g&P%Ax$+B0Zr@6h4r3_m`V3URF0fLzEQc_kUhtjVN-w8c z;-(=-vEi#e)%?@P$5zb{vvVw}3w@(dyIPi3Y{aQ4y(G0pY4F(jg&X+uHJukYG_7%$ zxn&3a;cBT1R4v(xN2^_!?l+OpCCuQ8lPqb_#VXD(K!y7q}cF-IdeWU6;3=5 zJk}DMSi>#{c(nKgG>tzeeql6(N~2@3V1XRdo(3$;-;Vri3rSz~6rUoT`9mwxIsK!T zxTH}BA^d0!-5t6ykT|6Y52lJU$ z_{!ow-M5;K`)f-nXN47)mP3;i}Sh^0G0_0+j_9M|K#3v~YG^3reP_|V54 zTz_3K+|ZLku|^4vFYypOR=4T!aAUlo`G*_QbP$ZK!sz?gcs9^Lhl%CYVU}|=ZlJqd z?aj~JfXLDCW?d?v{AX_Q=L&9fQ6gzxk-&BQPe>|kfuTpLDAUOl7uKjUvm!GHs|aK( zF0G*xT1#Q(%>!_&S(?q8or!~PWYXm|D{#&o0~{~YhMV5Xaeg-$o*bynaDxDk9&QpkN$}6fycO^(7Y{&zhACOSKgR#+E*Ls z!4G*bJ~a*Nq)vg|tq~Yd+y=Yy!db10DXa6CjZ0?6z{#3(uJddDqF3gY5UOr)+?Q?Z#9(R6`9AiH7-_ZCGMr>RFDNBKeH$vEERL^X72 zT;K=Zccndfg%h7wU*_LN=8$&#R#>V#j3%E+8@N%ewtzkjHZBD1*V?qUo zpA%eLZv~D5GfM7nGUu5fNd?Xa!6IBu6tG&zn{BkEXBqTFz0+)<&*PFxvL8~-Z;Gajyih_lXEptTIsemlVu{Yz|Q z#!QIueno@y?@{3(Uz+n_5-M)4B)__INJR-ue(ndj()t7}KbJwEpAsv3GMEL|EC*!z zIAyRQ_Duf@-Q%{fb6*y)qT~zsgUdiU!=ub2V-VXlTi{0zI7iPH&1DBP6FDz`A9x)Q z1{rC-WHCjUr$0&ILUIfF%s6k{6CBQkABrdIR$0pZ(#ZYUTrX1lRL|Of=mDGKNUQ%Y z;w82$V%-g!`2mi}+~&!}P{v@ZN=-F{(rfC9VxB@!;16~KRQbm8jI z-*7K8qh`vSVCs38#C>VJ3&R)7gSbYGf}Uj3e+%{L25? zq(Gl#62zufj(k9)ISjknQ}ep!6wvrR6u!jHyOAXGvN>VRLgZi z8?=tTM?*Cf=!f-E_}VHjIEUqMz2L(c>ob%6`8<3Oq z(X%1LSJgNAb;5SDNc4$^D{I@wC zzw}4b-=szShMUIRZsA*-w%QWKQLEX`L(a@9WG1&~L@eKVLmC6TPoYhMGjd^)RA01| z-BB8a_Bt|5uYNNZWs-oa`;CCBlV(rPjm5%g-LURTu+Jxo6wg; zR6CGe?KuT9N|gv*@+`GeitkOgow`SyTzC_xG{opomKfW5U@Sl6B zYH}s4$O^|?nPpI<`do0LKBc356NT^PIZ$qS2g>HfXx>&$8WvAM_e&w4Ut-B-DNMs| zpT#&m^eNvqYdp%uTdg9I)*}|UOT8;+GhM~#QyY!HN{V^EG-Cdgm|M8Jn?nZFs!gO{u zc?a&;tjGox{NoOOd_qP_7U<$+2P-B#fE(vW!JOG~EMxFU%rsQstA7vVJZ8)V)tJvT zGcbUrg}AdSkLzd}7=U#(mr1th82{Bh8vWK4K(4I7o(ye(4=SozwgG@AW*x)b#O?cmhqCL$SKg|v>t#Qz$KK23*dZp{`J*{~fp&6Lc&?uZyPGu(pYiT&^z$2#H^Py*X~cTf6nL zBH0{CL1p73;(du_cun#Fo|5pzlhU#H_1H%E{rZDQnBUUSM;~GOfi<{{ouz9Jm$2v0 z?$KPkfpmdu1u##6r6VVxeDPZL?zldy(3z zBwqdUA%4oU-JHz%G?CfsRoFXuXpPscy=eTYpOaZ&$^ScN3vC0niwv|+K}!9XQuHYlzFNau_4)0XugxM>z(L=_rJ_wJ51ErMgF(_ zo8SBC`ZWXe+0sIOr%pk^gp2e|IA7;?_%OA=Yn<1qJf^#OEfcL$#MO(Zg1z=F)C~_4 zZRak7MsXY6!7ywVZ(x^;-caQ#H?Co}znBED_L6xMn=9L@@Jw4hKI_V#289Ec4O&X-%!^RyJMaXE>K zi={ASng`zcdI(>?-AxZh#ghgKyS$U8EYsyY4E#8PO*OEm-sib+X9m#w<`Q~uBg6Kp z4Fan_c{FKu68jz)k9TV#u|<0kjI=}?U+s#9JBN@*p(o!^KN6qMYJ?QaSrA}9NAS1a zB-gCJoXg1rG2 zj2wFo})%96s|reWZ^HF!8_7Ji%<#>O05!6djGYWK>3 zYd$kXdt*a6iF+H_(hLVkUF5;CT944|&_f8ezO3bJHSnr#%%f#79-OR$kCd`W@lFPp zTwaI!O5T#yoiQ-{@L6isSR^=H+9`5GESGaehwV-p%50<;FAB?OBfg4BU*@O;RrqQzt&BhG}pX3KLVbg24GH*Xu zX}FHPo)^)k z{UiCl5-oOdt^}KLeLN0#KEkdCcR{`PK2lw7Ak2oNFfUaJK0K=usa#pj)i1iwPNaKd zlSz$8a%2uoxP6Pfw^u>x!&T5`xmw_=n=q9Zhq<`xquIeN2U+&X@!0B?kK_6hsO_L8 zzj)+(+Hm|o`gvd-soO}yJvmu!f9@Tw!(UrGQnsD5%=-inV~4Oo`EtlLh+*i8818Sz zJcyae!P3B?Z0fpNg5*B?O-jkI#6q3LUYP^U1NV`X(tfe=MSB>wPZPelOS6M7T$$9? z;}pLnhlY(zV_myPk=43t&a9^mY8U9hp>64FQ8C=TVV+ec-y_U3~%VI!3IpaI4@vT*zDo=1`UUV5(D{ z1t~i{`E&Vmfq5!3qtNyIgPtORRdQP-;dhlv{wtt>|GsjES9DNQ_If5gBbS_RZpW1! zPq?NL8KSndH)I$Y#~$a@b5Yw8__NVf&|6_nwyI-r+rLaY+Oh=x9xCBii)zHSC3z69 zb`JDZE!l5XWUCERDN3xuGzzZ4;g+q!Z_Wbkhi(U#xmEPHbQ6DSurk~6SP~+RjU-(! z#@9V97p95TfGahHJIoO1Uz|)5$$ezj!ytcjCx2SlHJ_Z-#dBwVL)W6=_)FODuW%|6 zan-Thg~C+K2@B$i^UhLg-7E48h$oxyY+kogf<-KNQth5n3z}x}LRTyqRLA!4(?geV zXMeWBbM4haKW!@VZ+k8jy2#=+tu}J&I>z~pod#oFC&4BEyX^4ShXD89it1lJq+5$V z(0KhUvY&br#EU~Q-}4g?j06w;i+zg{aa-6Fq>RQY#MWzj;Ct`ewgpTwo!GQyynxA{-Si?D2f7yEeb zHkgMCj{ETzxJb>N{W4pK|J=_IY_KHu zo#`7yKIPltx8xPF-=;3oxgqo`*V{nv)Tt!9REM>#`;QBl(+vstvPe>61zPO3W7cb~ zfPr)X^VL+s4;2W154(sghv>oFFD`6pWd=AL%7LWCX4L!e820$spia~j{B&_F_y!N5 z+rKW6xcob{ZWF!{(;MM*^*&bgr5p^jYPqbO%@n*=@K3~+h=xrpp>0e3pvHbGj4QQg zUzXXhn6Y7W`}HX*>e!2|g8Ov4MKu&!nBbuefo#F}m2A#{m4ch$0gMk0K{Myag46y8 zJL1_1f3II;L$jsWyP)ADUm_xtR%!Iyo&}{>LMe1_0rYOr!EaNvnfj-Ayiw8x%l638 z;h6tuy3l(VucHZ~fK=KzU>-AdS7M2&yD2*I81!q31iy(E^J+CRAt{JEO6(&Mfi#L zgf_`Iyl80v<(&_xN@*QhHd)ic`;$cr-|ND#yePgRyq7G#yHoBXKbE>b7H;|!)4rwB z5Up^An-w{g67^nzv3i}@df|2!^T-1gZTo3!XE{XLRl>E2h_eNr%j%#uvisCT(&Hn6 z6XqW>W(jb^T@D?u{eX|=i*fH{ZM>N%g6-M|>41C?z6tcE)k&dz?YaiCoZSk2eZCm{ zVJCQ6XMn7)3Qb7t2giHGa3^pWU!XM`4eyuGQmzqhr|b}o+Ax(ay*&&D@DUbn&*FYf z-wQ=1iG1R(y)fvRJNtg6oEs%DO$Wtn=K~MP2(yh;`YDb)PzzAFn3+ z9c3W23dBVdtr<)62Yr<6OG>-LAPqA;QYMH%vB>EJ~i00fN}2pu7|0VQ6Z0Q>!q-EXcwKX)WY0tU0`St zM?rs+`7gHw)`q7(yRZ0_w^67OIc^!s&Wlgbm4B}xc-Cg_*oh)OAm9eRyY`U3r5Z;k zs8zh&f1G&sop|tXbcCo)B7FV32sT%(f~~=_WPG}d6#k@fAH33F>LpjyZ8zlBzf_`t zMV@#!B7yP}^58*LGvu6_5B-Wtpi(>^ER0NX{mX~qgDWj@!jSD;wA^o4VVTHZ(vj!3 zAL$}5AqSu%u&piIBlsIV3*pk&SA14zZ%sh6CzvEU)qJ|Ng)(x49yB(Hb^OX8)m2P> zzw^0e^Oi#ipI|SO%!{Okwovzt0N@6WH`tdVb zIa-gk82ZuGC$hMCmN}%qm&O$%XLH7c>Xc-i#m{xWpxp9&hvAg;^4JWDOxC&K!DISElyZRXYX0i#7!qe z3Oh|`NFDJjWf#a6b` zV>as?&oPb7i7e}3Iv+SPg(i(o!p8aOP;o~Ry{BK}p8l&R&&KPM%6yEm+4lul84(N< zvfgTbjeNm)!OQD4jB65huKyNxFu#~2Ivo>79cy;r5syB~+4n-^dO96!0+%!YyU9#F z>oOH)4#X?3^SS2HpLp*jicD@ZN8@)Vu?do@Sa{NaO-wukMIJ*@p>sSqP5dbiHmszf z2A9EYH{vUu-=d!edSEoJkMy1AL-t0&o4r=(gjfhXWl=xPxHca)26}R`cQonl@(+BE zz|HUr4T6?;AK<**bN*lGX!huX6kcntW#djFlY5rPudKfU=QhT}xB^u&mwyQBr`HR; z-tqXob``Z03}lT{=CWCLM^eSKku>c?-^1 zdy>FpP8aa@Ax!M(z&anKV$`FV_}b_bm>H@-N!cQHv?dqSJf@M^D_vOJ-wXY}7lBg6 zNqF{RGU^#77UK}{-T-;}^Kt)*DpM_nAO-o-r{vK1fq zHjw8S2Rv)x#q8&fz+U}snA(1nx$TamR=1s$l4(qae`Q&Ne*}%6DHgm12WY^6vu%<}Zt=mi1uD_*CG0#^Du9(TFZvC26UOWdMYp;K50Cp1;&5L#Qz$OGd#|RKfBI0 z8wk9=lL~Cr(j0u+cN>Cz;z|E$26UNl7`|{cuKR2N%-outz1k&e|I=oC)fy zllYO>#?aJ~2|($)xWI44^vop>&K@*`WdCYT?$dD5%Gv-Z7yKw5=1!<1^!rqot)aDJ z#r$vWC^{9EP0GrJeAkzyP?eSsJ%zi_sDByWl{98d;WbT7I!)oSBB(li2tHm}CrZm7 z%_fYoVsG|0!|0JI+&3*1cJ=Ex+|DfFmDfnNyJR@au@L+~Wqa}REe+B)-^RV1m;(w! z2jRR)Q=uiphOz?xh*kH`fzu^YnAzM8u|buhy`vZ4NVy80{#1a+o73FYGa%ZQC&Mp& z(gcS0Dr<(DDAQ!)Bp52(!*-r)0m%vrCbm|fQ0GRH8YA?d14ctf;CudnaLxz8hcK!{ zl`gh^&xm_jTE%_bf)Hajh$Bo#$ZwLG2>Vx92I9{Bxo;%s|kk%jCPuT_6 z=-~Z0fg`jQt+`4rz5=tHO!$KPbb4F z$87%kMG3r?W=rkmvhe+$CVhW56Hji4;>w;)pp^bh?%CFVLT=O)nsV)M>5XF`Z_q`C z1`c#`{w9)b4@8rSMB4s1hBvY|BG*%Ll>S|0ItE`T5IT{OD zTHP#m*G}k#x5;5oMHIWp?Pi1LjAxnY_Bc>4jU_7hK}h}|UTfNC%E@jdJ$=CQ(r3s; zW*r7!F<^aNa(oVVnVY2Y2;#)GEUv@|5A+A&l(~*$uvA@a)ZCLNtsQS z=_Y5vOB?j;FxCjne1~8&{OdK4zji!^Ta?iR!J*8MvKqK$w zEchUIj#NIp;?$L`xNXTpVN*ywx99yf93D4%P`<;0TK{X|RwdsDPyKnc;-(}#^3r7OpCxf|%}faFtAo^| zwKR4?k=UwvIp|Lr1BDX~lFnEuFh9BrO3t1VO}RCJUKV%5pz9yVRbjN?o4O|6`d1C_ z8r>)Rb;{I{eu5g8nA3;e-9iT_4hGbowr`YwYVY{F1cq6}bB&AZIXBIV;A-~?`cCFz z!n)bay|e)RUKpUA(^c;7NCW2npAwx(w!`6xYhm*=GmIHAoVg^A6Ef5XS^2>>zRs_K ztn|uZ(&XuAku5yOpXYO1lUw+bd#iZoPt&O|{SZFQ=_2!}^|<=VGuZjfpA|d{gb)`C z28&u~)xSjcD%FjZ>CB}+oFY!VEP;F8|DlU46;%szVfOr0u=neFm>4{Ot!`--U#WP@ zr@WbgNf(@Ozbbje> zCTW<=zw4YzYHzgoM&T9pw0b|RFgl1mrV^~_c_JDwohZyc7UKF5liA+yE2yo*jeQkd zFQ&6(&|&5|-u&@#!Ko_CVyA|{%GqJ)T|WzhM#$kFrzb>Y&Q85;q`Yu}`(x=#KJ|fE zBlJqZPvFLJ9sET(V~`o>k5O%tS*Gj}Ufs`^Uz68Q+qC3a|L`+-)^<45Dleu>D#PcY^A!hlAyfvwo0i_bD^`RymXs61AihuPh9?LiT<*^*3OemtgWddK+%r+FIa zI~7leo$#}3gb(F@<1l@JT z?A*URm_DzH_uaAuoqGh2T-yvB(cB4s0w2;ya2b1Dp2{yj+Q-dwX~I{I>)0l%H{u9S zAN;ymgKboJ$K@W2qhHIvaXb5iA?sf?MMPU6EH4LZdIuF#%qgN%m^u3Pb3@8>!7eSE z2F#DAecLryN1KT1gtPgM0gilyr7z6OFQwf^f5H5Si0&H?!nXs@vPXhTaP2Asm^J#d z`1AW#IyQjk-sM=q_6!}NZ+3=ER&FEj*C%+tU$N|0f;mR0JSRQ%ex9Q@usda$Xwukg zv?kCN{a*fsg*uMN4nE+o2)y30lWQqxf(+|EF+z0f@_pEFa~~`{bdVVyn?om~Quv{z{FDpht}LUjJe`~U$%s!S=0s6Ryc$!-Rj6~ z-gq{8j{@sAS}e{kTa1rAQ&{JWWvDYx8t(X+PTse6HO?=7#aWmng8PF8=H8qOgX$+> z!{D7kCIg!Y=6I2JHFXfY<9k2+x|Z!>m_;iE5gx>VZ2IKmN*f zNu0&f=_+jXeKTQi%^-8AKS*6)#ZD&Fv0TNoH49@cv1_|5JFRC9(w=U(a&0P1Z?$AN zGaNlE=ECZmKI9djC@woA55LUEQKC``Q<6!b$O4Wvd>97D+~#9Pjv?p#{x0M`lYuWT z7ineGAHFc;n5g(W!{>gNsCSf*pWi)<{9ek#rtdMB8@CfbrVnA}((_pH^?d%Ej52ze zC*pjg*KjkiL2#*if>icrUa2}0cjxTny_2@UZFjdx2cj>6wpuuSj5LQeZKLszOC!sRPH`l+k`q5<=>OBm%4H%3rL#AL}O&eGGJ(U)? zmGKX^U81EKdi3eXX|hzm&JT3W;?rzsyjJ*6q_~iLyhP-=+%iTl$r^ znpOoPr%B@NuIC`1bY8sWQv$p*(!#S5a%|a3Rj~WEmt|v>&`CYV?K^KvnLbZxTy`Rs zDr{oaD|$1Xdf!)haHiO`^gDFY$!_G@S+8~)JwP`od1&0Y={X6V~4fXTk2EJ%JLntd9~ zUA+H=%lTZvW*jbN6~pG@8LM}6p>Gn6|L#R>|2f+HY(J}VETLfnzbeC*$0CJTnE1Go zE@>-*WWKXVN6m^o%TdILjyvckKMfUQ>rs>T;Kd`c_E9nKxp7g`Fe=&>J|})+JMMQ- zra5ED>F3$TS8kkZ?>_vmE(xD*K7-mVo}%+(f56r@#0Lj*=)6P`sqZ}@&Z7jFfz`Y`HxN~zqzyNLH2IuEVYwyz3n8v zSdTiM8Ek7sAAM~uql7;pKwPFiB*#FEM{Br)m z0`F*ujNi;|{}Xf&?{4&&q{^ZM{qVK=FR=EnfhWFOS&+j99J4~5$=TJg>7b7(c{^}# z)hW_Yc~9++E#PzVblbCgx3N9RjH+w~;3F|lqFbBsU40;x8+{SCmE5CZeN9XqGX?EJ zTw&P4ENt)C&(e;cVkdl7!_1;pX#Y(EZrABizhw#kFYq?rNV>^pl{;_| zNyZ=80+ny$acZ{}o*I@04m1aEK3c^MyIx7@^KS9aw9j(;wL@9cz?THpj%=&?Ov<|> zX*b_(49n zzV17}OIYm~>eI?L2z}>VCAtzsZr@ixb|pZ&Q||o3;;{ zS?r~us%dP;zDu0*(D}Uf!F+b*^LnOMcneBinbLq#Q?~n&DyL$1ACI})u!g4xMbaBa zL9zNT@yiL(eE-*Ix;0_^TsXK}?k>vCM|M|LiF_ovG#0;-wk4%$EN>Wk27$ z@FNtynZ(Y_(&zaBeIWj}mOmbKkz?VSxODy+)}CxG%;zo8`RYkc`wUaA#lw;A?U5wU zvLrBzG68=n1=iR42Ao6#S>QJb_^H*#j<(Ii1y9wOl2SJH=KZ9vS7x!(N#0_=SNHij zJ`cd>j6aP~JV4!+3)!Xyfi1V|7Tih^k%Y*SadM_)y6`Sk87jhY<6QP<=6HJA^dC%E z5(CGbmc!bU9`JC6gxKd4&`_a6tn^R?9J=+_)^2?e+ox8{1udP8GsdT&J0FWm`TL;m zWdyfzLoiF6JqX983`M>Fx_DWGi!k5&H^0tmGT!y`L;R(IX%Saw+Z|Vs$X9?(`c3%j zlsv`>Ow8D5M;x_g5o>DRCu$6>=f}R67J6kzH)eepjMupIx~X( zrj=;#dOm-9SSsyixlf4ZRa(giFg zqnI^5`3HTU1%^dqO6~joaipGWNFy|dQ~W+VW?{3KDXmba6%(i6_oaSxYE>lby!wQH zX|@_!PX<59JOMU6YJ;`7H@~F z`WE>1XRE*g_XnSze7<_c0(iRrJGo32@eXg#z@olLURvlE&B!hTsY65Q;F%J>QtbrS z;k=u)c^wF7VK6z;nPuA_g{RfO$^PFNh>6+{>-Skub?O#2#5INsdi5U(p#a?Oa){rQ zR}A;<)QBa;z@?~%kT1!&^-l)l$<2qT@{9{tw`?pVpd!d6$)fQIOH|`DSk0OJ@UTFF zSMPquT230U;X=0R(>ODB?%7*7*`p~=U0;sBluz<^&YUAoWh(2hQ9-V)TXg!9et zii)?~;_pmy5>+=DP>F#FChgb62a7pAVTK4#C23+wn*~1XQouri&vV1@BFQ;_Bh^h= zz}zK;?2ZgY8@!>k#;H)mr@)^n)pYBHui*RZ;ZoK<<0xDY>um-xrLzfbTL{{sO%1X#Ok-aNZ5BJguQxhq_h}-H zy&BBx%UWSa|3f}4UJAo)M&erIM|@qVmf)va4%21`yfB9xZt>)Jy8QJJj8zTCulvr@ zXT6o2+Ja5AEw&%FPfenRq)j+Fqy*NLEC-*eepL5LcYNfdAMd9CPbmzVS;rJCZ85^b@gmxs25-5;0CRW_6fO`spwOb2vdG;fa$A;+x?d~gL@%yjkUg& z;FtNF&jRp6b`G30mA!}AL*E2>H=by*~=gLA-*pb0cOFx3`zqgCylYhb6VmX#P zD~%}xDWXoWEDjOabFZzDjHcWs@J#1kMl0fl_ur|^PY+A_CStf&Ha@yL3v1$n*!>!7 z3=}%8pKUwIUwhI{vX#SG?eBvao$wj{-K(XvMVGL5cLFN4T46!bSGs0V1Jpl4d~})} zd0u}5QZNW2Y@UX*?eSE(RJ=6~ILuZ(gZ}(9=-Q5F+oC?{6-_GdtWDIJVv|+!k1Tv4E zc-iG`gp(nQC=CuYmTgW?y3XBfZ%^KW^6Bnt@vXWx!Et!PSYoK&}5*;qA zM~|#>7UcJxPR2(wQ@t*lCukCrrz~gbVM!4BxB&Z3jwLsxqp+&z72ExAD7#cYpRF;< z;1=&NLGyW`;)T#)^=4AYFU^?D1u8@Y{@9Nz&0b|%5&u>;Wm)^=`%gCmXp^9cq; zDWjqu&!oQmKT(0$ zFJGvqYXLLbc~f-k#bgS9p(pTpLSWmM>GWw%4W|=20Se-VqxOF>Omkg< zc+O)zR9fLpUvH?3&J}6nllpu3VvQoUF1rsauIyzqnhbEm_+BXX%o7hYlcJ?pX5ok> zsff#0@lQ*0Kr--o&8Cb(kQnRB3Wd+p__>WqOP9dQ;GxhwdpFz>>te+nGZ-l{6sbH* zrm&Y*Y|Q*s?6A)SD2kemxBpXwu>J|?SCq|VjJ(Gm-PXjMUjCz-Ug0FaryAzBt-zX$ zm12z>FGw|15yDG5DEF5+w|QQqXy{#lDL>qBkl$8zKljs8fK2z%Y6`L!s&Xux#3Ht@vq@zCbL`_Zxq+j7|V~4XRJf6A0G3| z52~?F|Ap}B@j6`CD9d!W&ZCK{dpUKdHvaqG9#WGA+l>{9XmMf|xLxfAzh6M}?6322 zt2A+g-w@JEyv@5Ew&UO3TuQ6AOX17JN#vqDh`QQ7!ML!S@b2~_IMcV1w&Nfek$p+@ zW7&TAI6Q`}dAAa0w`M}LI8L~)t=PY1Cfv-M+SIMz%q{#QXg4pnBD6o9@F!T4~j${Y6G+GwtzN-_i{||old}XF$`I(E;22z}(jv9w^ z;9hkP+3)S6Gmae7(Qg8U*jO0rdJhInuHf#7zT1v_^M;0&N3&LUFSe)Hn9e7LQ(@Fu z{>6w@ENM~zm0vv%ZZQd@UT}bI|0~5d2>nf$*1Z=UE&2p_+JRTAlLF=IVf-dDW0IV6 ziu^_#W2bj3;nTgfG`ajUNO&Ko`@d>w`N?H;{AHP~f979MdE3jy4cIR__(>a8dv#cj z#Gu+OK2@L?{SNl7nT$4j70^Rbm%Zy;fo)ksVaL}v&ePI?{So|ZC)DS%pzZbaqOgRD zt`u_5v@)qIS(U!@hM{ShCO$T~L!!)KY`w7ikJmlK2ai@2dU$TY^0{|O{(L6aioujt z_7Ng9Qv_Y$G0hQoa%*7$@;_YnTgk68M+ zK@MF{+@|$nF=u0GM6czO;ZFHzrn<}xQZo@=G)&~1_&cf#}Z|GhD+N>=c4?VCuZk|Wu%6IV${o|1Qt zA(>~{T8XU zSrDHzjO%V}W`@1FXy2L!lXW~`mi!$W)@croFHD4d$ar>ca0;CH$y2A#H|}F$4`lYJ zQqG)u`Z;zaTc5sJ=$S7PcZkB7#it71d`&B<#|Cn?$Fv~g#2R+Ly_SZo%Y>_87rdK}38JElTQM*V;vLN4ZU&oHdod;+Ue z%Gk|3JLtVBjlbP{h?7*rk(QO{E&LW2!Hb@c`p)EBj$zK_N+|oKZ)cN{Md@DBBp2V! zFKiXwue1GWrQLLPN79_7XUU14o{VGx@1)tR@)+Q3G}tHSv9v5Dn_qEH7aNzqgCrXp zR`YfZuDrJqO0K&>XHXaaXTk(@>J#Qo?;_@BE61ub{ORiZDC&%grjma{aqfEq+7qil zqm10ZKQV(1tF^()lU$&=rvk#_A9DSw?zC`_G|Ts`r!OgfxM3-ylzdOS*!zWz5s|y8g za==ju>&Pll5@s4S(@}jpK~HI+;hC>MC%>1r_3GgBf!VNJfn$4Ru93{gA#8uK3LJU( zgZiu=LX2n%$~`E9r*T_o=^}fYdwg4ML1;ZyK4}Ig&ncYdz^5b=sn2HXEyo$AMFLwf zPIOCRHl^>XMWL1-N%d1%!3q- z2wrnfGgPOmp{KDmo3Y{_lrF7+9)Z6ndRWhQtyN${mdT*BNerzJw6)Bw6In*_JY?^O zW;-0%x>o0v~(&Q!dgN@{e@ul3J2|lEIpqiiRKb8CHpGi|M#bS)$Ik{BO$K6ez zK=Q|ibF;Oo=p9DzZTr;NY;iN#6ctcqo-L&mKckr2E>sc}Blg=i0w)SH(!~MOILEU> zH;9}hC^kOg&!lI<`2T|Vr8_;qtD>HIru`7>teMK3cTZ>KpCtHQO)gMeR3lm=n?p4x zvqW*Hdbuxp+i7?79pG0lWgnLrvbep$aAdF&s^5&|?7mnr*1I2ejCI4t=jP(8k!>_x z@(rO+gxDg`j+rkx4+bAzz+k&keAvSIr1+Z1W^_MnRF%Pp@+(j+7%<)7J?C_165A4D zfpkPg=+Jx2YrefoLfC+0tJYwJ!ebg@5(jb9@?gXWCklAbjL(nCl9B98<{+I$20;uf zPTL}=sKC7tV?3Cc#c9sm2)Ky{%Xdr>Rg7;K32Hm>jXqvSj`tJ_IJ?;kJ?4yPIPsp;PV{7=2 za%TK}?bTvAfpN3Etyq|kLLsoD0+u}U0i&;1;F4+#>WO#28NYMXS9pQ`Z19K6H^em3 zp@;NcT|lY3qjtzc6fYO_(u;bD?7sF&_D9H{-F-2aYZv+twM+hRiV3EeAN3n{z0zQT zRZ1+bSAnifn}In(zp0VqHR|b^O>54FzU}TncRa zcp7CUw(wr9>D2$WAL=#T*w$ncA1a#)dG{;Wh-WUq>c+FKjfg-Zl8wX(ee@&Z197 z@5rprnzGI7Q1!t_9wzqSPNM|8r7#-5Y)oM{l-)t+UK2hFE2r1G;mk2?9~eC~rDZCw zxtUrKus(PxjWY)v@OlZ8vi?Ad>F+o?*F;zEjb~Q}&Blg5S}fS17EBbZ*q^$cywS83 zH1=#X+_av~9ymP^&d|px?(IuxkgMbkMkb5pcRb-&e-QYuMkeUyw1|8E!-(#GSSFS` zoI^Wn?$f-ZbGZw3CD1up8=|kM9dIrBeLll@}UN>OIMcS6uNWfeOp zcsX37{=tM5DlqY%kcp3ruyKBN%E&CUM6`EQ6%5rC88=fUQrC!R~((ATW3mpJKiMwa%`< z=KO;ws&r22dsg5lzKQo7J5)_KCm}t?exDA6 zpL$=>Xk7~~e5QkJQ9kOeTn58~2ScvI1yF1n$$9lflIbRIHsZW8OPO;DTkD3f>4MK! z(ZClYh7MyIL;v!zX6xb19TE0l)M7QYFKNkbXEy82O4R$LgO`gE*oo89+-dP)bO~)G z>BH%eB`tUcWCFp2tK$K3K(=tM{vhi2Eemf;Jre0jEWp2>8{zXDVXt5-&w7q-;;zM5^ZibPpy|dvuzlvpcInEI zvBhA_$`yP+f+yHYtqqfGR*=TggRsMD9@3&N?(_|9=J%zR|B%!}9c}%j(lVR^y2kPS zReyQ5(~4@({DFwIt?Z{u5>9+Kk=^nMf;UwM#MQaBFr)pw=!)Qj9PQ9bmo2|S=f1H8@iqF0M9|TckJ=^^=|H+QtMO-4dZ`9{ zRijvMRXlBZ_?7aNu8{Ti(RgRvWwJ6XCc~;4)ZuE_ZqN0sF4hA^xwUay8lrH!&|z@M zN6M~BG>5`!_SYU>m_Ti2{haQGSj^F`;OC!I!oO1nv0+Y=u|QgyNxO;p>q)EWcU&8H zw5yoD9@Qbw+{vg?Z;3DB71`K|Y+UH{5T;w7V{RG0;knfpTC?;K_ip7q?v#28W-TwG zfoJw|xrJVYn-{?2pKI_TeZbElA?RM4#BL_9V~thw*(D`)N}IWsZCZPrHf$^b1LG93 z`cVvK&(FcB&rP&{{6O}s>p6F=Mh5wgZ@2>lzufR9a;|JfGpUi#;4p-Vcf1sxR%G;6 zzZ1klM_uk(Q(L8lN#tRq2p#M0K;E{OsC{QK%0A460uMjisNip``D`g)y*G@ipAW>x z8>-3BHJa{+UIXWxQu3&8;lEzvsKqqfHfdulxGkQ>{+i}{pS(5x9xMINbbG@w_5#6#=( zJ72v7A^I)1>Bw=&6>o$OhhB@6gO_02>M3m7z*O$$!bucx=^q4nD6v|P%|a(Z1|`k- zPW2(v*^T~ng3m6SZJZ@AC#LVjr*aY~;h@R9WK{VemlxTLUe|m%+!RTBHw2tnebw_O6LQ zJ^lT3IrtfUlUohd5(TWoK$Ut!B{*WfTnNx2YTnpat7|WhK|+2at>G(( zRFQ2vu0)HCjBsRo1k7%%hIh-lIJdrG0;9{CW}l0OuGHob+Mjz zDH6e&piWM6&PJS(ri=ccN3&m^+58ioK6)#3Q}4b~45i~{kbGJh_-$3;v}{Mi`&oO* z@o*+=)KlaioqY*A-z=m>OUt1{=+p`AlEc9H4y4n`!`8|qO7FhQZ&dCh`vnycr?i-v z7OJzkE7SP}x${Zs&_m(AU&S6R?dF#`h+xUi7i3}@K>3%4f_jx9h7L)iuImr^M5RW` z4ZFZz+W(^acj{^UqKT}@AdwqW;LCi?&SSwI%oS*gqVwPHjX z+akew!Cx+RQWNu6?c@4wqgeCk?RaQ|4E_3Dh5Obu(ZXdP>HNF|R9II`Ll@Rjz-o84 zd7%yz*!6J955`j1fJ=1cP97Hdj6t=Nk*J(~2!49)q0cYA^FeQxv9pU1O{1r=NnU2y zV7P+){k?{+o5zvWgJ!TYJrBp$eBp9a8$}CVdBJ!wL8sGO*t}l>w{Q_S)K?YqjeM|S zoiaYZ8i+THu7UC6JKWILAh^;O!(MNdq$P#we0!TQ4$mWQ+?l=XOhPn%+;x?^t)wZO zbtl570nz-|xi%P`Bxq~3L#QOsmJTZnM!V4w?CX3pyxsqU`;;Lt?+$0OU;BO7+y_&* z5J!PiJ^l$d#6yNv8brXNTTZlbM;{&f8NgN91fb*50Ze9{4O_;Y17nY?wA0EHV@JM# z(qc*)cvt7rh7UgM8O8v`7x;(vIzi zaP=Tex~Ic(=SQ+slSVwZb~;PAHj{l7{KG}3#$il;s&HLC!->=VT*vUv+CdfTXo%z> zd|DeTo;^$#B1CZz;vWMS{;9!%g_mf`+0_g>EksY8g30cgHPf8i0ox)zlF4ufaF2Wg z^FE#j*;XI8?(v&`RF0(WcY0}N*aGxA^_`xWDbmczr)hTjG-hn@nmc8A71nuc+qMi9 z^o*0gx!#>)=|}rLeq};9+qwdoYuPNS{Z|VEQqE%T&l?bXONvE)PlK-TEO6vRY|D&5 zsD0}}{}nxeS|3&RzNr{<*6-lP$l9^=a5HeZLFZ?%V_8SpH?Qf-uWyV$pFQ zoS43cU6${l_KZo~;N6p0*`z+s;hR3rn{|epo8wMfVm45w#1b60UjZg)ZHG0nrhNFn zG|n=`nOWaCPakv?Sa9eY+IZk11^g#N1_h3A>4(BN0xO4Z&IE+qs&E znYb=Xm9@p1ayz^vFxWz$YI2^?(ilbb5Jj`+esvJ)D!e0(dr*;rKG#+@6i+97rop~R zFw$?Xct+DGaL_(TiT!nSYKt^It6arrR3C?Lf1?HOwi2_utVDt086bZsin^m$)5uUE z$Kw$SJ3l?9%vtIDmTpUkHCfKwX1<{AU{|KnQV534+8DcM7bIjagwe{^xNrVBbnui6 z3>q_l$-dBK8%$#HOqT(zo1{#v>@qZ*5$?2Jh40$Z%N362x>me=`gkHx+|TswqAt z5o%k4V1kq_t=nEl+n3(v+H)VlmL_{vA31^ld@&38yO(>$ zk4DHH0gKmvfNf6>3cSB#Bx&8ke@Ko4bK&{4&eNMI-ra?rLS|!k(QM}9S3^>!!eKkBGArrsLrU(fB81 zC&g7tFr$_cOy7P7zSY_YwK65#b1S0Hw=TlnSKSW;X-=8DG5@nHv?jyVLc!i9Y0*N5V*r#6FgaUz!Q9b&sI zuK~CZk~r_fCjPy?H@ti+Lp@dzSfg-;;^!Qo9-E^=4rC?^^GT(}&7<%@gFBuhJFs}M z9X=}$V&B)U6+FbYU}2U9WwYW~bkK1+JZ&Vqz05)2xD?RcjVE!1eZ08a2pdul^7&0s zH2=FIbFL0!(pw#AP~rsk&|nNp7&eCYukgj3^8+#1eiIF>-Ng4UT7(h%;$ZT)0Cs$m z3f|Le<2Q7;8Mg zNcdj{2)tE)h<`GeeVSd3G9M>Xbkbt-JGLI?Hs6H2&rfK5S|O;d=@;Lz+sJPCjzQz@ z-@Kx52&=siLXU-9oS~i_{yiiMwXb|xZ{2V@uAEC#i>sk!<5iK$x+gGs*;aJ)iUZeM zN}yV&&Z2g_q71Kn@4NU9B&?HVpH(IZ@5ROV_Ommr@GGVf!ZnX8oGPN1v9QQ2l^Ose=xgnxOln=S<<@V>)s`;1#mLxKhY;yy=mJ z8*dfa&)enn@x*U#?gMv}N-z->^tscLFPUgIRf;JXo?`m<6VY|iUQp{jjJvdB;K-XI z`VusdRUKDnM(1a+U&5R=aflk8Nka@44MeA(V)p3fBE~M9=PIr*C+{z{#5|p;sJ|4n z!o%4vlbLmoW6ki}GoCMMk%k5D(_qEI9KQPM2fCOqFvjYI9u(^ZLNClgPSRf))s`=( zfp1IU$OOh+8@ONC1;oLT*|Dd}Nb{di)Z9O{j%G?U zZR{UNnmGbS8;)Xk-##a`c^L%0f`{;U6tBMV6m9wNi_&XNnZkx>=2I~QB|lCF(fJtq zZ`xu=>srlnt;Vvq#}=Tn#t|B`ZV{7SY>CeQX<|5wge1)q%*kq@z_|ZG>;F~3>_>5A zJ#!mGI6vktEej*9bU9czNSzhT?c&C2G*Ik?wXm{Tg0;N!hZAr9!h`^KntS^;O}ZM! zZ@#6)UUeLT0qM$Q>~vCG<$9X8oaG9O=5L_u#^Jn2=`PecD$n#(dbn93?u;4!f$%6R zoVPv@UhWdHg2&%^JzEFvkmCuExVVG+@7-B`{ZmcYQ@M>5P4cI*wAI}6-x*+UvWLQd ze1usN!d!fyKxF+@4D$8v^ey`wOcK~O1$}X}vGFZ8dE_C~9zGl|1`XzCs-=jJN)$sy z!x)fd9dKdYcmBwVRsmNNOp^}^&z?u~AlknP{`%T+^KV@gEgO)Bnw7KZ%@H}gWwe!T zuwFr-Gj`*i@A2HsfMv8$LFl14b)P!zGlU(00||{o@aO$GXj(sj4NqN1r8_3lQhQTY z-l~Ec_0RbX=kcuKy&0O0^u}W`GX?M1Ng6);2W$uo;)<32!Mu_d;l6(>cp6GL$Fg{? z&(scIhxUQZ>WQr0d?2b62z|ju66|^QF*+T26Kv(8MUvCo;rj@A@uuD3?BJdme8=nz zeqMqcE~{RGUo|2{mY1UN8YRQ}HVwR$izxo7$|^scK*`b87`aY`?Su&S!Fm%{s{y#% zF_xcpB#5#S8~GWfs*ElR{vgkxXePN36Wd+kU4#tw3>yc1NgmW85zU5p8q-3LouusU z%U&$VrV=6RTcR_b{c~&Nnw${=-LLbvZZAfE^8|D-Nn#u04cU~ne!vMizwh6UfXvkk zl;vuR71y5A5wF=yjR}0KJahEFIE_hPmIvQLL)No+1MdAD$dCHC8_TOEv71j-&_l>m z_WwCfo9@~QI#er;iYa9GgRg*Ixhm^4b6^t50t-iAV@@lYMcGPiR4#V`Eq;}VO=i4; zkEsSMS7J4*S-xE8BjxEse>&;CH0EZstrU8e*OBh;1*o3pPQ!O;^1pt>k<^xP@U+d8 zZL08MGk1?-4T~gkjcO{ac05nl-c|5Do<*c8FiB?Eza@FQ^$dD!u(dIYT?|&`RzW?) z#U`^~k+KZQ0!Ju%@`iE)Ft{+EMq26NZA)tmZBWMMFPWhF<~F^r)@ITR_3)8TJ?|Vb zkC`RTXB!>kq2BT^4cwxK{++FmAfAYmvu@H=TQ$6*wiHGRv!l`#P2BQvBmcE|Bpc_} z3HCCYtW2+%YJ0X&rs*ZPlyn%LT#*2=Zz2B0Rjj~C9{!A)i8VL>QoyH`T;@#^*0S4^ z6jqkVq?vlfq>hB=7c`j=89)Ot7f~l}_F10DkvJEi{Xx}G8)|g|9-qq?5 zwYrTWG9}q|xoLE=ehI%;UFa<@9$0svV;Yp7;BZxN1zw4`#u>L?L-T|x_UF}ZvAKU0 z-2NqnHFZKCkf0Y=c{<|E?YqS1dnA!LxEM&S8?Y^wTX6^5XGw#iz&h78HaO0sE za7vvucxKYt@@m=;T+B=FkEB6Ygv|GoZWwJ=O^K!+thz1%{_c=u)5rDj>8;nGEi?gg z3ImuRYT&WI7N~BpiGtombqO=}LKlWp_yc3+1rdom8PQex0TDG8iDf1!<* z`|y6lIqF<5^yy6Kf$n#=z_D}=t$K8v9yvO*dDG`%f9VhY=G1$*AVCioN*2KAE48q3 z+XS2@7s!e(R^jKp55Xto0rF>W(275*RQ1!6?P_kv%H5@$|6^~=N%3cT_iCVV#AS$< z&*!8FJ(B;_YHOiuSER0Ln8jTx_CxwwGR)9P_GrlWjS6uLF@ENJ6+h`R62rv7Rb z_cc`u=SIe5EeU5!lvd)-Zv!Z~@c>qu$I~YdYyQlQ4#r|IVAi{N-5kLKxyfxmq&)y~qwUxmV8(=b9U62BcXp@*I-Oo!Cj-R;*Qf5l1cdpHboFPWiM z(ph#**Ov-xj9LD!SFm2_3_tI=nn}qG!vBW33fkNyntSayr_RQR#+?&GQ`K;|o`0J@ z?66{2=WOPZB}cGN{ycZPb}Ear(F3=^MdWQS?A;a~hFw!Lu=l^~)T1KwKJPljb|sEw zy~-E)eLCSdPLr`?T@%1C_Z@FDKpq|qJcsMUzjL8Di_JJD!6uD2$BD{IKwepd!?%rv znXWf^>1Q{&Y5Fc0H~$pGH%Ot0?t1pYb|TUnTa?=uO5N5axMurE+*<95ziW+f;}{S2 zR{b>}aQ7k%x+-Fwg@NMa7o*wU+%z^u*c)+Uu87@xgoWRS3b?j*Ew^=d3m13qGUPrG z;p*5T-q}Etrb^o5!0u)Uz3)qkgDlyek}_^V#2l=w=h)USF}OtHAE@OFWn=F?;JdEI zV$`ea;MC-cq5(E|<)$oq^kgYc{bGS)hk0!0&UyIP#e|A87__*{;tZk7I?vnyYCmsa zN=8v6Ic5ZYjtya#){G=yo59#~R{=koXQF%l5V#`Wj1NlekUbKyPln$`RhC|&sJ1KI zS+`TXM$tNu#x__~JO|Gh6p+lAIA#`l5odHtv%9p8d=v(Nl)gRf&emsJ@2Ifx8B)yt z&RiU29Zpl6WHBsY9=ute08`cr{uu)&dN<)c;Ht|MaAz49+TWwnoIbwini4ZeXo0bH z<7sY<9C{1q(VR(ucekG)ndCPVBGHP^LXX3suer>8+66L-H((_L9GGT6J9zDT4*z00 z!F5L|C!opL#Pyg=pQ}k@%2Y$A!EPM@dW?O3P z@KIVk{qQOTbBVDqWn?A1OeiIT$rW%=>m|&Z#`qDM>$%mZ9C_nBMJ#vH#KYTNSyuf} z+_OfRf+wh;jny(}(3r^|* zSTF1cp5h{Q^Z79P^v$2+7Yg&g_yzx~Z8$yLB``V$4}bjU)JsyUPpUfI}oWe2~N^(JRut(@? zuWZy6{A1M&H}v4Vm60STJpT@7TGdW%l!CYM;bb1L3+tOAnd6i``ZRY2yVV{I=X2EY z%Ds9@h}eVgJd0sToIP8ui#6G8^YE*+yRT(Yrt3Sv*_1>k9==l75B95Dx`KhK)!Vh40v%DN(OYo`0MuI zp*nz!cKKqt!WxqBJ1f2y(hsJBr|_@n7&BbvPjZ9r!=#IOoTU9Hmiq7@y!bo{++=Qp zqp&~E?daq>4SAd{>{|AoCVX%}3MEQ}UZhP9EIu=n`oa>>K0uWve%ruA8~w3$=qi!V zoP$7YlW`37r0T!hC~ZnW%f5=*e#&cE!K#v(5}BLCWh#THH`cB5Bp-m;%g zhjc?uyB$aWZqSY30jxJTnzZHz)6=^Rq%bNMUzxq9=u3xS>Va8IIiU<{SDy!oL}|Ma zwLIQ(ixfNcrh*)Pc!8#nFP3$6^qTLt@Q-lm~wIo<(gd>l$U zj84&pmw%}}W+dBEDa;X#nIK`X2d67K2>RJz9NV>!B%OWP&MQ2b53i%vWItAV{~H|2 zcY$iDk!-g85eOGF7V`~%Ih}I{@T+<~6g;_2j%WQKsYIH6aL$0<1-ymjPZ&`U@6E za`E!sRCx7m0W?>|fPQJK(94%j&m$ke6vJ@(xnd4AR{r6t3KychqBGrRE$~6zhW-A0 zLCifIg$dhS;6TlL(2Ki3ZWoS#qw5vAQjtc#lz|?#){8uf^T6(qHoZR(P8Sb*(r($~ z)M9m!4_W&g=0A>xwsv>ch{4EqG+TMVM`f+IeQ3k8(zR=e8q5KV96Rzt| z1BC4qda0ZpabQg&6kA90za*BieXxYflq#pX`1b$z2~a8-Fx}2yyyvZ9d{(D6`dG55v*8tOA>F__Er&I3*6&f=AKfL_+x~Qh*o3L+i zp!ZGZ+3I*LPU_|}=o5CB6NH@ZBExmu=QGYMUhuI^9DIbk+~W*WdUSB4<_@^j*hA7= z#oY6I%PHE-n?31pW_#pc!T_fypg;95Ef{@X_nmwiI@lYRY3?MO`3LF#D@Qi<&j%szU(T9+cEhvtX`GX>k688oP*Kx#6;YVcG4^%; z2GaZe2YxpmWnQm`;&+vN@{Tp8z!6*7!ZbUf1E!PQYfAa%j~1l8^8nX1!CaKL!3Rn_ z#Z*#0k>$m-P=>N4Y_VU@Y)?va*M~L1e9a0R=h#k9rY_)rPkg|apB%~lE>DCrTmbp4 zjwh*_wYbj2gU;&Q;Z^2pq5RQsaIaT~{#{4K(>nIhfn(Fq=AY1s>Xb=&jgriI>>#XY zlSk>CEDAY#pFe8ojS9oVN$K56HZ0%Kwma@WEPc3?Ipz^qopND(Z5%v&phC(a%P91V zfv_8JA#aCsU@rX_HSFd?cEC2AnSOvYzT5L^>H2GAuO8k4@M! zg&p7X0lq8lf>X(IxCM$?(n>AdmUr1^n`<=tY&VG&WFExu`Y=*dFkv-6tKmv|0c_eb znlx_YLAYopNOAt887YaiZ`;|=ql)bMr8d$uwqUiZV_=bxW!R{BoTa%;K)KepuzZ>r z{Bs)NsrEfy0OGQ!-E;Zqv@ejgp$>;l2*c^!+MqX4n6)KLASUKHXjJyX0f9l55D>@J zXLvI&wOCr!-9;ZiU1dLVM7VqRB(&S!$u7q*ijpe9l|53V)wUS3@A35c&~(<-l)_Sn zjc1%QK=d&Y%uSz;JMZkr{)T$aPjVlpn_I&V*;LD4aM5KufB&L)``+*-XZ^Ty`fk(_ zE$oRm&jRoLA?%Z6Fz|x~cB%gXj81>ePyRd*?_3M!)03XDvfDcB^SBSV!=e2N6ZN`E0E#cEu$r5{== zGq|{a!{GahfzYhe1o7-WZ)x;^d)w_vHB&Oc-S~eLorgbH{};zagp7-Bsl zf|~vvv1`N;;h2=7-r_r!Elh@p*KEsS{s}EI{82{!fBz#t>tH50PPQGvR5uPrt zq))ku;;!L2WMmM-xqU)evHb*uNtsFq^-pkU_!D|EqbvWO-G?5FpD54pg)nB58CAFi z^X>R}e${pyZn+$mKhIuB87tb!3C4;;4wzwwcVnb}YX&?sPvTQ{pCO{-L^51yn+us9GZ=js%mkd_61Ij*(%JRbqm&Ck-Sju^U!bR zJ31A57%r%of~o3I$SlZ#rzh^woI-PUwlSj=liA!i!;M#ml~HQ8ZsptJ?R5Y9RLVOU z%W3!Q_|~3R^n8#RschJS#U3YwV)J6iT)B-pPHKWSg%6^v_Xi&B+(tFacF}du-Qt^= zOq{-AB3+0J6IAbfhB=?!3m*T?ln=;prAhIB;Yn){wH~-fg*JtdzQzFmUXk;!zFv4D zUYWE$e}T2n|B_8(3U=!Ak=!3$6e<%I36`P__+`3qm-a;2AJe(~py9eaaY;EPE*{7o z40gavx1VCn?&*A_bsqIO;G26!;yff# zkJ+oaZj=@(cWjda9c6c&G%`uuxOaTcmlR%C}T zyvHYMzBHYEU%w^l*$I~#1WG-djc~TlIGM(LMIPA6jWgaKrJW{D^d{?znDU?G#j1!G zq6}V&{f_oUj46T9twU(k;U3hl>Uu@L_-CZvUz&Z-h>-#VQQ&eg4_&L!k-bO?)!kNS{Ac7XfLFer@-fXAE94ATROHWN^FV{u-kTj`g~21o~?UN>)m}( zNeIE|JD0J`S8X``?kedRy#wFJm7?bFQu_Al2^=$<1O@a~SgU*-|NFR;I;L;oPp^j2 z{&TxAE_^Dgm}atVjUx8G>x}PoY;eNSLa48h!T7mj%gtXV)69=Hth(?a3|+H|b@u1L zo<-3dn9?IdD(Vtrm04p`Bxo3 zQ>}PPdqTzLgTbJ_-xapLaK%!MIaGlA@P(-l_c0%dXUv8{c<+l){pB#so8v7%5^lx+ zCY_;^7Z-x@T*l&T?l#TC~AbAhbv1uU~$r3xD(j`yWbAwmu7?MHNK@< zb1Uqpc%7DPEkMsT-Pxr=9hVjuNOPnjDmR)4SI^ty4$FE*JSaISZ7AefFjYwJpZbH} zu)o!ZF4`@_tCugy>=tV9yWk)`u|1ueyqe^WMJI5DbBMU=cLSw{U4#cuR#Rr6EkB!8 z2f?#PV5I(E@x_Y>dipF9dWH6-qUi5pgVl06QjiH7GVH}^>&Byzbu#^GD53pk=BN=A zDXcVAh5F%3g|L6$WvYcfc*u1-H}p2-zO`DEc+oXCdUky*r{W`4{NSvW@cH`{(eQl<y3ZA!)4*|a$500aACAwPzzN0HG$pf)(=0j*K^hKxsLMpGYEtDgT?IC2 zABd;EXkkFfF|ai_Da^qnwrGyW))Pg%)!&-V{Jad`6VCH2ec~*FRc)B>ip)=1_7|n}@)WF=> zEi`ysPd3$x<1*)7eEpX?bY8Mu;u8*`83_wny+L}Sz17jatuOsMkr$B?J24?&% zp_x8w=wQP|zCIjz&MO(21`kE$mM7q}+(X!r*ozOITfqN1x4X2Lb)X)*uSva&=cIi@ z6`m(d61SF^W9<@E9O5nQ;n$I(+%WHU#eWALcF<$gT6)wy8J&>$*I?@2qe zMDj{f#`V1(iVX(R?)wGP&$-27zn{unb|Og_IC>gt^ve=HS6!pHLowvK>K=^kuYwcK z?%>_qcW`lyp5!$c&!Nugutu{Fzl}G-=~Yv?wKYy+f9t`Oty0#Z!Ju-i#%w$o@(m`< z>x4V=_E5JLFMfAw0?V)8VPEG4+*BAL%>KNb&3?ziH}4n@R(ZwU_to=r(`(YW5>6P7&Vw2v-g1I@LDLiT)___)m@W^dGoaA0oZUD zF)=L|Pib}*_kTT#y;NJMr_jhFnhRk3u>s`dy$D-<&+xfj0lXu01F9E2kv#L)D1Jo; zNL`hH;^`TD_}*Hqf-H^>*o^+U0E_x7iK@G7;K0$taBz?^_DsoyxiddYjtf&(8Z6~9 zMmNG!BOf|lvJk@661b%?n$NGz6^0f}r?3O_p--Zg@crXwaFQ~6ySfD8Q_m<6ZKMo* zo4dFbol}! zw+a-e@Ek@*>hMNsR%CzTr;In{NjWktd=^3OfCfqmoVCb?OBP?igyD~c$1je+)6w_nj^wrKV7{Gv>-5PB^jKEi zU2r(nLw0iG3AEeOg&Y6uhR@a(eD&u-9?~y>eO7H2-<+)ys{9HuE=!+uht|;O+9n#M z&#?X3K+OHvAjaNU2^qDi5V5s4li}mo zk6Oa&k{jK3R-!cP8HXDNAE&I{8NxyDKG>%)5tZ7zI$LBm{ z8x(IqMaC{V;yMuZcg&zRlLcgz6b(}}n}oW@rJ~hIZT33a16NHQ&tq~Gc)`J0yzYWM zeh)WfFWd${)g|XpS9hB9?K*`7%;XcZchP=JD?YwCPwd|tvHIw7(L6}%VFhg!KPgTT zT6COg?T-eqi4TL2^(QICtqgu9bi*@M+r)^WmfW|o4Qg*2;5NId)a7Cv6g4_KOG2}nXiC;vOfp)-KAw)y#|E?bx;+jM6 zR$F4?Kj3o>Dg52wnpkcZ1(#g6Qwx8g%ptvTe!+SKvs1#opED{1{w150aRFxhV0gVm z>O`OIh!@m;2>TP1F>vZ^@%+0i_#!WbnR60^;BDWW7pEDaYt0huXKG5KdzzSWth$OBH(MuyswT7I@(oF^0 zz8b;K10lTYU6$ye{|`Ki&(qDzUBD|TiC$Dha^K*=*t%K=XMNv67sO-WzOfG4XKbfF z+5>oe&-XOpLx27}$dRu(m5bY4N76&-@38HWL$6p7GtDGOLDy$LeXQM22bEP=(bEim88%V#(5~<= zRNYm5l^K10Ghdjn{G8CeGMf}K&WIiF-{nucTkuL#J^d#e$ypvQ5c=B!9S1Bx_hSd) z>4>eO)<#vzGXDt1{iWXDh!N0j+(J&u0r`nH`-GRTzEa+yXv#%Z%-)gA_NrZ2>)Ign zGj%}M5O1MCz7agH*x^6hEg&iS$h1-7+K`uRtZA z>p}kSUXd7vf!{~sm_9kY$ifXSEc{S$>X#L+3|k@g{jvyF zj?fg3Y7LQc6@$3KOA}gpy`&p&GsRgRDVT1c%kwsm;aTTb;qHx7B`5AMavJFY&E_+C zW&K)Qbml+qI%+g2YU{x3t%c%Uqv3F*G@WHbQ<%!MP|aHfb9;7Rm9$K1Fy06L^{V*i z%`q|b+jH93b*C5#on1RUUJ5!rb08u34sDb8qZzT%{`={3+B!ZTUJla}hH$gw`Ws7G zCvwp5+68gs!vL`VR|p^MX5yQLF|=sU5zM%$i~dzR>Brqj_CIh2;>4rD_Te|)Ct<$(9O|~b2cCIVEj%(11@|o$Jnico&^Fc=XD>JiDHqFml=Ob7d>)52 z(arR8NjUnvl+Nfp2`_oO@EmE*bxqqI!i`PoW-lFlV-pPu%OXI%B7-`wAIFNZN8snC z?+{RIBJQ42361GM>sRjMVcyX=`QSTohO2bvyc)s%J8xod*(~lk;JJAGgFTv;JY=P) zXgZd^31%!Sg!2s&&*MfG9hA;vv${@}@*@-Zv4JPJ>^MR1Lfz;=XJwqBFaYIMrwOVA z{HCfb=*#`-<|Q}WbUcSH4Ool;uOGnkw@K8oTfXf4Q&+Z3y#?#e7|^ZvF7WfpTF5z3 zOtZSAgSMrN9EyLF+Bg+#`0!V9spwMF*|{!lU9bGLMJt* zbIQX%a4)2aqrTSB6w6E0WNZj;Pr5*{`9Q2J*11iu=k7zc6>UFov(OF=fAsTVAl;#*CJai>;qqRIM5=M4N!ZvtI+9@0h)US;M251 zuB_8lUFy?hIG}CJE2!+K zOpo8s2QRBvV)l*yNTXSi-^_NQu}>U%*S;Pw{+=dnam~i~_a!9E1}Jzt8sm>178|`7 zhj}W)^1QX+f3*`hKFdb?A0zPU&2cbUb{yG$Qr z+#m{=mMvSlb2dD4H|G7Ah_Qa}#jeXDaItow;F*~uofEBLnA;ZWzwW2d(_aTIru>9g z!bLjN(BQH&t`jKSvZ2tuz47~>UNBVasbF@!6Hl9HLxGV&ux)`3ZXfLd^A~Az!|!U@ z6lF^uCA@`y3F`&BI~QQ2^8*V09znscwm_h1_Wzlpv}*Y*iOpp|X4*f>&3kl)^FJiE z=Id;ZR+>U5R1-x%lfB@*M~B1pCcwthVn{eVfy1>@u>N-s@+^2qH_jE)=wWkd#*mkk zADK>ZJ7>}~#kr7rM(TS`m$X=Ge0c8rukeot7iEd!j%lCdt0bTDrCs~D&LACQ z^+b8_vSxA$8ARE}sc6(;KRfr+fX45~=tG?&KMbpft+tAsFh2*(B4%;>EjQXG@k55K zu7_S(i2_{c&m-<`;?u*OX%(-K%HyY?U!t8@k#YwvRLjKl;<>_sEJvB>yNzrdtYA`L zJTzYLWn)<+b-pzS?zJ`vD<1dc*R7Ul1rz!Dnod|;=1B)5ZV2UuyefBB3eq$_Om3nCR$qc4Up2oJDH)HsFM~a^_R9yIHig@?@ z3}Hx)C12g;2X8{>OTM=^;;i5(I_cyHii<{r_4q@wu*5FxvrGfaHxFWudp6j8csl>B zDZm4k({RXIJt|FE3l5j_xZU1TRF*mu&Vh^BJatfozDgqa)OM!$1KY??7Qm@}&ywwx z$Gp$1i|ddL9V=&iAA}Ec{P=NYo#fk<-Zqa7ZrglmnzQZ)q2N_^1L=xsXbW3Px9=*j;3j_a_3Nvy?uukXC9O?8&_%ev>NW$a)r(eyiMD8y%kO$^CdD$;tASW zI3%N+>(~DxQCB@lbne|QUNZNfS=k4`@#A{b{?t)oKW^r~O_|)kU@lo?PQY%(5=T&S zvRt_nM-K-2uN2=@XmvN4|3zIQ3qE^OCa0! z|G@mLpD@#V7ETmAalySJ^6%S5f6iYM;;QO}nJv=&?^hTdQ#mZ^-c2O^$5!&|i;ly> ze;?_Xha==#b-`M%e`KI&K_0fRg$qOc;P0UMba&=;+MHTWIuA`S?d$^B;~YhwHTP1# zK6*s+zA=hrM#T`h>jq6fzKpZO;-QaHINMv>L1J4E zxY3~wvcl(au)L6!b6)acyBu7(FM>ltCgE!F5GlIt1bs(q4$Ye{)n9m`f_u*Sj`+I%dQ?F?=3QPX(d8k^6f>_@QD=+n61?IFI@ zu1;4aj>Js0rTjidpR8>j3b9Rlc}`}e%O2~a7(6b5yo%buv2QiJJJAG1BE#2AQ{i)U z6FuDDD5UgnfsxTB*uL#7TxspaKPSf0^81_l!}6srYW`l_GwX#IUl@o1<2K>F9y++- z#TubK{toQfU=D+HPxHKNM-E%{6wV%qSNEv(2Yq! zd?cVuzRx$B!@neo`736- z!+oh^Ztw5LdTSMV#9>$PRhcig4}Sm&D;A?dZ7#ej`VUxj9**_%2ovWzi=UWoIPgBwN{m68emDj8*f7f z=98!M2^i+SktcIsZ*JnRir^eBWML5f@?Wu2QPKgow)C&bbb zQNkZjZ-$*4DQ#T^-D``4^UItFk4jFOPEzh4+mHs{jUyR$`_I~DX7J7HM8A?*oP$CTBj)STN3=dV&@-A}#b zUs9}aWOf_X9yfv0>0e;L9&HNTS3hQwH$wy48|U(>Z!e;D`vgvA_OSD zBYjgP$h`23+G{hwWqJgE*9pMWN`GOXnJXGPCkW>n6WB;=20rYkz#%WAXmzcjkas7E z?z?%@o!RNIYltRc<_+29UFSvX)jsHZvOBD-Y2d7f1~_qOABt+40Uu{LptavIdVJ%F zOl7SztAsei*bz;1Y3C3g_$Qy@wEN=Bc&P(Zk_&I_ev@$~{>;wxtVo3EWI3hfDFZc>(>KEx;GcD4{AjM(S+*hJUZj zQFn+wEge1&SJy?u=M8GYgPmUG9jTJuBhLPH*mb?WUj|l`B5o zC_Mv4xq?pK1Y8+@20otMPrWl_!lLc2f_nD|2<;FA4mS^i`8Er5^|qtAGsjT+(Kx|= zUPoGVE(%Z3R0i&AbR|M=Nk++b|wwcoTA-J`}@yc7zP~ z%jA3HBJG)13Fdj7NcqcHcDNEE8h$sRh5cT`F8h2C%HPBDCCV87AxFBGY$M|W1I`{5 z1zTzc(!qz9Va=7h@{8>UVZGxJ?6o9Kex>7nVV=Qp=wB007xa7J?6ck3>Aw^5vOm3X z+_)6H9^VcpK1ajY_d#f)A#%T!TJ$^P3MtHVr^we5E4zfqxlM99`|g3p^&8}GrQQ|Z z|L8KpPlp?Z$>e4l);MyJ3@6?BLDd^=h2>+qfX%pYD*k68JY9VX4o(ljw;KY`F=eRi z?6^wtzggS)?Y4B0XJs^<4AJAF4GHqc6DO1W zl_F34>rQ=gBJlgzNup&$5xlOOOB;HRBc7+rC1?CZXQSotDf19kjv6ZKG$ZN z$zws3e2npnUqXO_)SD^!Efm;kle@hjd+Gm8=xn%H{&&#<=+=@64)dg0YTE>QK3fgv z-%McR&Ce*YX#(9CVZv7fx(N>sbf)9p1$4FJ9{&7WmwVV9hvh%Vf=|sTY?Rn;&z}$D zfcz?m6}G_RLleX=FNiz}ufa%JARDwD=i4s_3R|2PQfm^p-H3v-U>?$L$oR zY|I}fS@DP0J%pl-<-+J0zhIc6E#xYn!G;OPX@WeQ?ikKSDS^llKP2|Yf6vG(I+~Zw zd3--m!4EFZ&WEqFBGG@HD$MGC1{X}rz@c|+d97greDqDF zno%p+EIUx>y}cUfW)0BjApT=Kitp~MfB} z6V}6fYel|1_6}Z3)qto`0X%xzR7%;T#7j?gq`!|N;lKS+bU2~_F0PykCJhpwCF%q$ zDI5)}!xz$2b%9qcH^+BQ1F^_#su1)|%0zu~;ON=GIOs+Ndb>~Hi`UetN5OD<>hzfG zw|$e3i`vazKWZT2^CHaYA;P97fjHg7OXhsg9h&!DfdfHFQjV*Fw67lp<3Fxg*?lB! z;^CYs%{)_k5(lO1q2J%mK&P(P#l@2k(BwEnnEE4}%>r8B%cI43Y4uGi)*Qxx+C^YF z^ELcSw!%|ohhb|EFX*`9iFnUuHwjwLpv%;bGGk#01|8oqcU` zQU{)PrUCC7M2WuP`)TV(V|qAa6iuJ3hpOv=+)v5i$vPXHvPYGktNGxsEeVjl--AD& z9|@;4Uy41&S|QzW7nGX!m&a4V~Z!A<$3;!zEHqhrY4B3W$Sa)5H4$nfb?TN3Vc zX61Q)Jfn{}78@L+YkxYUrctHv*+mW;CT$@HuRC-(!y6AyhYFor0($zz(vP_wyxj61 zICgQvP#0ZM*i!&=XG{<>!+t^6f&a1IwZE`r`9W&=(kuqc`|zQaT9Ep{0qoXoCk-i^ zz4b^xd@>}9Mt(d)72(Tp>2hgjQgRx;4hsa+MenG4(QMq`HbJ};6AqsLsX_a@Ua0pm zh25^*p)Oi>_+OoKiV40|t>8@id9a~xHH{hE6(c64 zQ1X&OxbLhF0>ZQ~Bs()FiNdIjXK)s>I5hw|P5@6oen6s>zCb&g7Ha8mjx z>@LrTy1)&>l8_WyKlvU#X-k#g44Xk-YfE9B{vq5NFrFue=2O_tp3J2tJgea}y6de1 zkEG+m&qd+b-G3xKJ6pn!Umv1ABiG{6oRz|xC4QJQ^(7>`>+;4)i29qQZ%e@x=+R4; z7dHn&*I*+a?BvOdU1xLmi6tNl69k2_S!l8OW`${gMf5uy#M#$SfC25k__JP>BVK)^d#(>D?4&8Ys5KF%J-AQZ z_a#y1bSE^^Ho&{rE6~7U7_SKkWuuezG`=Vrrj?C^sxFoo5iinHi*k4|IiA0EyCSH$ zy(OjQo^boK1@${S7H^l|Cy%RT;=huy(D=7IRPS4Yo7B3HhtF9m)Ekd~DiWdc?l3Xh zL5F;Ndt>nSr?S?db=dFvU%IwnAqsPj3TMpD3Mx{D#BP`jf{&-r!5_oPt3>KNocu!$ zPVQuMB!?P1-50+vmbwq!?cmz(SkRt#2u?3@;BzD9@pn%@82v=btvY6r!W<`9V!V|+ z1}CvRTaPx?92GBijG~}p+u2}ki5O_nog?2~rY>vykW$yfl-Vtx>z?(H?hYcv8$}__i9j!95#QgioR|L$=dm%K?ENtjV`6tv z?}~azzM;ug&+B05?Kv2wDRq~f9MF4Yq8O%nQwV){g;pe7r^e2wNF~mOD%WhJQ#eAf z>|%vIW^aPZv@60yVK1(9sS)$eNAo4)Iaq1cANvRQm9iG8^3hT+`%Sk_oV)J?sa-X} z^RL`7q{|#05ZPVw)TZ&OdGEoquv+|LqDrF^)?xeM3{sW+XT@*K#5-D1GOddaIM?bE z#cMd@Nt0&y-PN3CTPBf*Fp**=gh26x9m2Oe({RMJe9<9K%A3ifaO_MUHtl$l!k{}= zw`rnTfj36(QKxwq7YgIU-^uL8Zzk1%I}o^cAi1n67L_g%1}Gmv7ky_e59!5U@fP)! zCGklQD+Z%vDpQVysC9lQw|+yT7e?ag=?(N>SqUt9vy9Jm4-%TZUW#)Eo{>1(negnI zC)BQ#vAAFg{@$&Qw>+nyk=AzD=x@P~mgUfmv1!80%r8Rg%y5L*;XL1^yObFS~)4>i@gFYGR+h&U+jV}l6TVc)d}SB^#`4pahonnJ^VP5xHo}c z1npiEDE&_ywraG))iKMs_?H2luFa?9T|Iecgc53cxN*$tU7~ya6_~Z_4s9A6M)}Kc z%dNzt*d-`b^gYpoJ8O-Ew+oE0%f{}aOS#16eBKMU71&|R^KRT8{tLFga^=QnG4N)h zGCM5XP5!E*5m)GOl#K&-9il|9RUG7>-X9fmrSsQ2H6OBFS}orDsLDw{9XeB6Jv1J~LXKzg=0HEo|nXZEia;*=7_g;Cz{cBnQw?Nx&w$|tF? zL=%rsHpYsh$6(q+>R|spS{Q%nC|!?O#*du3@UZDBTohPJebqJKrqtQ7*l0niQTDQb>+@jR zc{%(W&;^Hkb;Ruf5`+BcZiy}U8H)N(qwOs};PZ^#7=5giE>nr9elwA#g&ilym^#Y& zG6-FpHCV8)5LPRP^L%M9{d8b3m<;U*SyCTAV(l*Ke8+=Mx4F}=G19Dj+(J|Oem4z=b%Vud>kdNj869Zy5=n&O<=d*tLlSPZnW#94k_D9^1I)W5XmTL!6THjYtC_YXu zhH6ln6eT`A6(PTA(gv=he8}r&xa?0#A}99Q3pZ2*zJFX%y!l@o1P%5CLy7qkzu>ie z?mZPQA2kM3`%2m2f@@Il=@NMty@ZL2a zl?*E+wu;;~9Yd#X<>h|Q;MlKg)HMGg_^rE1(~`~kX5VjkR5b%$#pH5?g)2I}vq7gr zU*WdoqCMV}hB?Q#a)6CGr|rE=aT^S{a$YN3U62A@>)q($v?4mOVH;;mpMiPHOL?E0 zkKl8(C$_2Pk+9#L|CaO?EJwbDu$Dw@P}?naz~=CkT);tpE<&P(E?k-W2O=7#@*20k z=vZ4yhhvY!jPqT&V^1fZd^d`x%#hB{k}LZ4S_3@1FP*b)s#j`CJGD|ei2Wfu$9IXpNIQmA3RK8BvQVJS z@r3t|QE}2)E^LwUW#2V|eposjNHxYs&j$&MKbnC|%LMviU$~p*hcS;%mNy*FLSJlz zxU=ar`jRM{C1oUzTjUDK$!XlsU6DuMi^mt&OR;oD51e?+1EWvcfkW&wL3r-Tw{tq+ z-exyW$Ez-}n!72c?E-x|UV}1=E~uCz<#?($v+mal{haH_wWL`z@3e3>wY>jYh z&|LYWy$X0Yv6p=P*2{1s@F0X{C~#ogX`E6PhI5yDfsxeyc;elSb){~`*l1*^qu?J4k_!$Q%;X)pL zQH#Zj`CjbW%g#l?Zj@McB26@(BHeAC&xL0O!|6|=16eQc?lAq0-#*OkYR{&4WTdEEfNXsaO^}0Ih^>IOCEl?D`_HZZ?L> zu4Wykz|^xeu(AM_^t6ZJlT|o!RD^J;;Ua8&eqGeiSL4d9exPC82L1nt_x% zsgf(A#B%~FeOF=U3NtVazbBl3-334IHHPos8ifv@2lBb~%VD9dJ`Ra1rrkHvX+xeB z#OQcpy^S7Mp3xQBV-!%kuf$8sd`IF~EA|t3?W(`YKmx;!~v&1K_49Ux&N`88)KR-Qlh#m9vz`41OLe50+xsx$4 z2B%;bwQY2(a+lNr&SCX)9WeN&#En#3045)2(3*jsSTLp^)Jt>mztdK6kjr?CdRYkr zyXHcM?{K>HPU`txctKv;t9ikyj(Di}KCC>Vh+icJ;;zo>T#(v}-`%_oEy1T~ukROl zAU$Juy601w{R!xON1dk)Tq&yT45!QySH!e^^)!3;Fbo;{g`W1*gDvgNbg$~Nxa#E* z(c#BaYG_$Lmq zGy0HxfmY=v3&Y;)OHL`^^}dR@ZivKOytaYr~^pf8HM=D%ZF@|y)CHKJVYc5{eA#_%H zXIu826t|7sM@5N(Y;?aCxOh8>yi5n;hZv+PGQN|Bk#dZJYemjqA{N-&^5#d8<&A{}I3kJisUOaoHiS=Cs-ebjQ+{+*pU3JOV(F?Nj5r-d2N(CE zFCDT-b=)^`uCXrYDcph=2Q9>>c}J{G?k?Nq@`CS_IKw19N1$f z?s}oaDb-_y{T-HI-Tra>`g|qWC%3~4{fENhiX(tSLh;t`j@;eN6`)xUm51x|xHZ6| z!nQ)!8I>gWlHsMl9eMsRE0B-R=Qqkh7`7t>0zS8rPIMXEDQ}_Xvk6!~xK*h4JuIee z>cp4Ew#qx1O1t7swQw>015~~U!;byTL3a@S0NGc`BGm;)#2bD|044v69AbB}0h zP$@OsyDgTu91+sK1aM~hFDhA(NZJKq^y`=Oo85AVo_R#_#PkRnvuFdK4l5zOv4NOx zFpZXXpFSjR)qAjB8Hp2iCkXo;2SMRiYx*?FTwXaxm!{e;goP%N5TgE(_H0^1^1$9$ zsInC7miECnouoUw<3wBb9g9aBQNNgO~M12V&$avLQ1xzzJoFE?Bs~x{W>$jk?pw4~`8JV%F=dm1-U}|#1?of<$}68&fu^EJ1VwMl?kEauEVE&S1^8{8<~0K!>_zo!iOF} zAFn>3?bbJN*rgUIowp5FE5x9V&v0J+J&Ci%g|Kx>6y3+)U^~;7n|Jm^?dx&yq^uLq zbxlVx^dxOu0xpe%vO(6`m-emPg0Gc^;Jh0vq&)LWFrH|~kLH^3)Q#~rf`); z<_-b-%+-AENq6qBCkqzENj^d4u3Vsa3X9{OLQQzQ=#vl&-;5mdJ_Gc zrOQ{GkIL_#4dK^s{t6TJ9)_Ui>GDMpp4jP|5%>BT!{Lj6g4EuV=ADZ$f998Rk5e9E zV7@y3IXZ}&Y)9aZyb`+Ns)Ruy`&mBDo+n?8!3dcst_oq%E#xI_UUyf}n=SQouJxk! z1_$U_I!%1E&Kb3wN5O|51oy!Nx2hIXhe3axvXSFgaM&YHw9;?&TAEqJOJLx5VA!?hl-!F!qMA=ktrKxv&O5l^-gaXcwd?4_vu2}wvvzOM>DL+DVKN2 zE`m4rOu_G08AKIJ4mCA1uBm-3-u~ssi_*M6k%qHY;2HWe;joz9WWmd~jSx?{*x|S< z$GPaVFJGubu}=LDs_b(`vxn|j)*2%$5&H5ngGlaZV$5&WScyVh2xz*u3-Xc}Q7)HQ zsk@K|d@tnX^FDy*=X#3ypo{m4$77eND>#1HCAzp!oo`8qrQ>NyVoEP%?zw*?Oj{oc zb!W}k(lZWJj%s1mpmX&1#0G9D`v&gc*R#gWF_<8K2}wS~>7$`DHY_@cmXoJJ>d=)q z+^MIat|;|=8xK<5l9@d0N-k~aWb3kHm`BCGe~akM^!wt7D{lPW0m$aW6f*Cbglq4| zV3v58KG-^n=azcW_}`KzF`$Dur$SDBrFrkJMu}@TU=aMuJ}rKT_yOLNf6$qEKcL6c zi^4PG-mGL92lb=8Q6XgoEu5SUN?sc=L*)eY>ODv{BG!r>*Uc04@K`E+&!-IM`q8~5kJWN8(9&l80oM4U`Yv2uzYfm+ zJt97|N`)-<|L|NVNik*92Qh0)dEZ4^ywLJG_}FFPdkuB$YBL@#ZyCvVyZ?vVTr%M2 zaVbmZIfl-Et$;=0Ivg5xg3L;9!q!?nitB65R*mKo9arvh)Qod|9>Rwj+A4TWkm*e}z@w6umMT?@TMbUch zbIedQq!cMLB`YfvmRwN-35`OpZAE?)>=k;`5*Eyfh z`wjJHqIl_0f2KC-8->Ry(6Hz3+_@n<%YL>NB%c*Q3ztfUf4*{6>jdt&N+xV>7>h-h zuE6&VbNQ?V0`sS}23%|H*wF9R2sNRD5GDgAbqHf-R127;$<7b*{I@+ViW~NqJuwnwHCL{*c12 zGI%1m2S$Kb@G)FICYslbkYgWxj?>{^)9Kd+JD9cM9&F7z3DT$6V1r5ympIp6l(Y3Q z?VQj;?*HAS>7KFrkzE$`mkq8aGF00dB5Orh7q)S?z`bR`7l_9t&Q{ zt3JL4qx1LR7;yw{m|sDS=33}EMG7%JDgWGz!?<>**l$Auup@I{_RGZJ#0G8 zO&yKPQe!djS`_#s9HG_^F%)dOlqs99#BQfTQH{%DR1O-4@|#`>T;J6+*ZUidmD|Q& z{rdT0oT;5Sfz&e(xUczwtVPBkFLZxz6s>2v%?qI7o6wPYug_$65hq?go6oN~ zON%7TQPt%u9gv&IuD%Yzud`g4mex?T7<88LFrNK9X^J@lYdj{-i>hq&*wYjlkiC*i zlh&MrsMkH9-*A%T*3L)M3KP8ZT#A+~Q-$S2yW#En!w_zv#w52!vdCyz%vmuQzcm}P zWO)Pjp->W@+)0D##j5DF)&<)|n&2m!K_iL+xWGg$p|h!mE>beIe4{IFlMr$eN`LE* zDCNSwqF$JL&l;`RBXY zN+E~a7PyVK7(SMY{MKN+K|dV&IYG!>U*c!}o5#jDII=;WyXoss59ZyGNNun7@(uRk z=zZuP|F!!%6eqi(Ow?IQ6I_)G`-EM;;E1*0#c)Ap9d5P>BQB~>?3qerpr(Z1rsu=M zcOrZ^IfS0K$gylm4OBQ8OCy`7LeJJ6+_M#$cypNysI(v8wOx=s*IpqSkSjPjKgMxu zJpKqB3wfLx5s$ll$KmBY$&jX%PZrN#)2vnToZ+!z*rYR_miDY=0e-do%|pO2G=_cn ztRXh*D-pWkKKL~~5WjY{(&NwsR(_e_Z`)7k_#}bxp;`R3Dif3&U&>ou=%j-W&tl}J zyKv^WFw@)Wi3M@SEXLD?%`gk(4OYuDzqVP}I%_LyRnDh5Z$Crgp)pL|IgWKrU5|Ue zTeD%;gm*(yE_JWF!Od4X3z?%&vyR!y(5w>0e%cLXrInYd#yyu7ZtCM6Z=A|>w+UR* znJW0IR*(I4N#f@_4MFoNZ9Mj|M$pN4G5(Q|#jzTJ3g16~_E1yit^As71!wh$Zd;Z# z`6OlT8^!d4-*bv1KSQv<1P=Q;3Z<`I<8vPQ} ze%AZ|3_21|y;}@nx>+!O(oTWR5rV7dNh?{e-GzDT1KFmi2=>`X%)wIn~Qi4PUd=LpSmNcGvz4KvKAb*ELh!iV9+9i2pS!_?VB zngiQ*+@UjPE4Y#~7U*lNM0P)x;1vB#I5v2|dD-hir!7@lc#mD7uyMAO-jxl*tiB3P z_5-kL=wWoHam=GA7xF`8&TC|iU|(#1k*3cnI&o4BOG-}BbA?oH;CPOz)w5xSg3#el zZl@(z8tHk&YNp-b#h%VTPBp96l6zS-Y}J=!NBp+1-L($ryYUs>ZF&T0OW%@%+BvZN zF^g8tAI#J*tzmpjH+jn*69r${1}-1xvD&9`G`mWUWo6xjI(ID=d$*mvnWSAm#v>X_ zrcZ+N-oH4>4r`V=cOS(*P=&8ao7mW;R&nVv-(`=ikBrUYG>Q$Z%> zlX1t9X(D5vM0UEN8cx46qYFkaxy=P@abo`w8hbk)ldrpAz>H#e>hm9^T#R)p9Cs34 z{iW+v`PoJ z-(7&Mo76CBTezq-zm(Te`zf+_t|OoKgRuJA6ell%Lkg$9*G~zQrYS3eot3v&@JTBM zIX4W8=hoS@aAnWG(9Xa(-lRX9UakDf->yV@yF0x;=ejyA`E$xb$`0)^WL)Z9GJ7nIhd;>){j%Gc!|B-3b!gm%-gx4zO-t2o*mN;nu$47}S(aakA>-JxZzg$6+$;yDp96ZM#7B zj0a7#8^JEDoxvtvjN<*CMnTN=Ga%Zx0}Ee&;Y+<_@z!wX7QW;eSQ#z=zH^B%lXZfrV;)lJjtsGhxjUUb zU<0qGDx+nFJf1Sy0$IHinWNkvQr9b|-*=)hN?Z@m{1$SnOTr;?>v9Ttp~<$os!+>z zvC#93;Pe7M@JG!r!k9{Tg0gn;%oHtt*Rnp5wYHGY9Uz7Qtv5LRsdqBkz ztNHMCPE1nB$ZGlS<13RUu|dXfVf2H^C~4G0HP5m*S4{_2aIF`{U~6)EIypXb2+ zL(Xvq%56I-dVGRiOb4~1EpZ7~PZQaCy6G;5 z#V;>UHNP!OEExvG+3Blx!W@;_#@ieuEUg$7vNQ28K^IpU>S?+ zK{d3V4beS8ccu;HX9(^dTi-b>P3X_YEP0R8xpDAB=wcoCI0z*szU80&w!+BA=efXN zOXx#i9b7V~W4$NM+4W)`bXKl{oRp0?YTggR;9{1Q+CtU8HJQ2mVp@660jv4xOfR*I zJ06wI1w3lP`;pMsO5z0zl*!cML!J1=TbQ14S&H@+YK8QWl_&e z0~XaS3+J(%!UHtefT!-Tr*b=cAuX`tJ{;pT-3n-2yd4|2V-0IFWVG<(Gmti2f%gL3 z*!yL6_(y3lEs+T3|9#7b-Px&_yD%IrO$JeX%x!u(R2#*!)e#o#6!>#eENW;NQ$J^f z3;yiH(+$F1;gu;FR5g-wwyMzUe}z3k>!8;yk4thK1N+AfWBI52aG$9kGdz$)@3wl8 zchNNVB&Qz-3wPar@$q82&ki^%C!L>Ec9A3=@y;)YWPt2VKkB$p1S56*N#Rqx;N8mQ z<2FYz@%l72`?WOnEcC}?iyhgYYiD_D*9V|lb(m&r-Xk?bS==(}Fy3ExkuDUg;y5dw zws)&wTfs)w{qi$-lyBq@^$kL2hh&KRdsxTn<2gvUONw7k6C9$`+ZP1A~%aOct11XWopWU)?g4JY*WqF?0~02w01GKRqdIyc!;QmcU&o z%7h#&;*KuQf%eX7*5;hZj!)^L_$WD?a(^=JF@Ho>|6G~FD|!5QA(ngFu#~O*GK^`s zC)2x1Gc+10>{Ryu;l|4L5NW>V?ONw!`*c%gVfF{Kg#B-+u>B~#wU?&Fl=0U-^z*Nm z1+uW`fi$K|5B$%3pfjl&Sk0;8-5DZn)Za{^6}0(Y@wwx-0DxFoePWMmSGp%zETd;4gz#Rf%XUKBe{+QHu>xAg4WP9>F7V{pfBXP_ zbxKs%gIfQQbW6{keY2j&S`%ckcC*lpebhw{ZfmdQ^94?{@-{7R@1`TmF2NR; zNE)3ll>Q};V9@m<{$H=qjb1MBdmY{iEOZ(6aapl=rI9aSu`YXHl>+k(dN?#G1TNEr zy}q0V|D0}u>Y#n7($maU2xpP}j5a>sek9Ycu*0sD2KxM5@Bj@e!0gq%Wc>w>AG z)WbhWu}ydf1l@tXvr_qR?M6N-K9BqECCgTOOX1jeF1$m9I(8jZhNM^%9Jv1kRs4v9 zwJkqk%+gU9@v#$bYL>9_#M9vQ@h;nYffF1s@_4+=4{sd1N%S^@E={(B87pe|lud$f z=jw61ki3Xn*<8iic?puY(_>-Vfw>kP;8t1`!Md?mxxPpX>^xCJXRU5=tc zd?HC_BaU$!H~W*ylq2+0*gacJ7zU<468Vn>qXbs>Zcge+1_Xu3QbuDUJX=`_MuA;o z>$=(C;Qgkq`Pv(@+9j}BkFBDK{RiRAUt!j_W-nOH6S@<9*7)Xg6s{kqj-Rc+V3u(d z90<%|p_1v`(F2v-tE_>jxcnGRYLREBp3VTT&hc!4oj%27UWKa4E1#r-*_;qf;Mq;dpIxGAA}P`-}vBv1DNuM8{+AeCpmZ1aV$S( z6N{K*iEpfyvR5`qq!X;m{S7u?+x~jP+lod~3;hPYU8XQI{}5{xNZM`3`nkx0PX1=l zKvp)_4&07q;-aA{Y~++w&a6fYM(MvNiRGKHrf46Qy6S^v!5!+>Xce4;5BNEErP;$j zSID|*7WZs)EI1X6Mpvg6diZV^^UJtN!(($n&!+?~`e*US6Rz?u8XG7uTT*0ow2*!e zcE=T;CW7U<63{ev<{e(CVf_&cNXnam`zFbAW(J2v7x6mN8M2%1DH`x)j*YZn!UI_3 zdr_30G7eAp)X}k}QJ`}wn0?C0fJ=u0Fk!JGzD+;KlJDl@*Bb=N$Ct60vckT)-3z~E z7@(cy64)4Ci<73`6I}0YBs;SR6l@-2D$U}2OP7G>sloWBvXt7=7qIYA*T~{&EGYb) z&aSx!^P1Z&aq{Tc`Yi?#5PMpIIfYFmvnhV)f7u0hT(}B7PapAyFD9_yW6Q{fcEG2= zVw}5iIp}&c(fzR^)^`87c+ylaw9^ss8xy^l6bLT#1zEgYLaFH9Br7;J@gA4GbOO!x zZxkFLe*Buf2bqeS0Zuu7kX!rq5p$ki&ED8+u-&Z!E9?4EnEfP?Y+n|N_eG7xJzILg z?uR_M2xt2E-=nzW6%vpcn}T11H1V&;1X1yebnb?Z9TsU$#Sqsjc-vIQ&wlJcE_zeg z$?*4ZWosno{au}FvW#cP%eNzph!Lz}k09yMC-^E;2;o!OsQkcFdZwz2I*YRDbI@MU z*6`%VPm%-eooZ-VE`h(#$}r=X1>Bbsby(o!if=+YSp9i{UA;_+jXRkH_t$H&uGCn5 zn@=zNZa6?8Q%}PfE+2k=t*1rK74%(c1x;u-BKsK{aC=WYJn8wst!XO*{k~9IFewFE zYBk{Z#=Y2S7cQz3oSx;j-E{twD##?*L67Ji-@f1wZl=AkM4x&*pep9Y;$jsjm8j` z9%l*@>kg6A+#9s>RT7=KCgg&ZKbK4@RB}oDQczMA~#eHxz zr4+aWzbSLqh57==dK$5;1SW@+igg__nat1^G;Mzrg{gb;pDIr~#~dyKi_kF`U1Ed{ z?hnN#+)b+3C5G2ZBiIXTSN`R5B^LF#0WLgL$J+KRuD`~Yjjx`Fao)#7=67;L!#ZC= zT5v2i-I~f$ON1_wW&*@rONNXSm)N&F9oRX{gE_oV7v=7_22skc`0e0I9J5{>W49EF zc52$w*DqK2_bwyR@#zLwR?-9i=Zn3~=u4arAEKWmtEy zn=U5FvFN>$%&;Jk?i)XV{|;`4nrWu=GIR)dWqZKd%0QfaBO9AH9i$h>!!YQ^LX4f) z0ts&0@KKrqo_Vzir@Oo0Yj+8#`ZEK%9>v#p6xy-6ZJQ{w;xI*nCwBI> zKiU1$!*h{>6Tnc1#(nw(D;MpcXsOGnGw}^cY_g)wy{ z{QaVTeNCbb`**dOY?D$DD$O9fMVTG{EnVBN*A)}4dC`bHIka;8KTdz#Q@-cz54br; z8){$hB5k(?xWnQBKS3`UBOCi^xOFNY6RZJYD>`{&*-s#OCz`(gn1m^6v(SHK0*iRR z4hQYkW(T$x0{g9n4=&Zwzs9R%lQFA2(ZFuBVNEMQ&-fVxdAezV&K`| zWQ340>_&|nLi%Yq;I76k`J}@KIqoKzz0xdBB9J-k_h9Su1Ihe^Gye3tDjuA^gjLBd zU`p&SFTBcVTtfw#B(&0E|7_gSd4c!7*@CsM4rKVn8((p1Z23Mh2Bg1)O)ox*3LKi* z$0#qpphM7a$?CASh1Y=-GS~~tkHWK$TUq?x5Ei{!AKrhoVi8k|_~qdzp?{)2Q~fN) zw&q7r74VrBID0xx=ck7zr+!lG3zWE=&bciZC zsXZ0h%PbInpB`Jk;vtw$9F9*qrqaX6N@gkFE%3P~VAhLNyccqb%1NdEY||9nGGjVM zRNjUE)VJd0cX!}!i!QF!8wx``wqjF{Jd>(jK_4U{*}auHu)l!=+}EI<1o=e56dcj(d&{n{<1p?QC~VKZe|*oY;k0FGESpmStfP7oXNG@ zr?J{@;rv;!p3fb;4fj6PWe~Agt(|_9R?-KlV~k}-8mf#5TpZgEEN^xh$zJxb?o&OM=uA!PyD|OZ#{jSai!KMf2S6O;+=zru)-j3j2wt$Uiz36@Nq~04J+h0q2V~ z*y?vrAZf`!*jzmjYg~@Q{3qY}^8$VNPvbA{>!yBqx6uuUkBLRE(TA8qZ9E$3tixN~ z=BTzYgmS)2X4iHLh|1T8Szx>y25dhCoqONYewvHRM~tPW112omtpQSvzHt(!Bk+ny z2|BJRv$SC$q*{D}c6KFZw4LV5-3pd%Fuo>cZ#Bm%y8)cNuf8#+#w8tZ#n^%5!P# z0W*b5Qj$#fgW%=LI>HvZ*s#{6ZJf1n8HQXb0ot>kZFQ19kE-&xV0s$6A+;B$PS)dJ zchtkgqC9GNd6b_2dYTU9?}wW6bS6#AdDD&rF|7Q@|5Bj9)n!T@-(F8$0$< z)Q8FJT%iQ^Dd+IpRx7gFpEj^1%Lq1J@#Vzg436@v$bS0{?5sYBi-e9t`J>TzR}@1N z)S8{w9oR>P!Ht|_QYyWZye2w$Yapr(jmJ!9dv;zu0bF)xQ-kh)+_GaAm9_~v@RmQM zB)1%VD!Rb%_c|~he~98;KheL@QM|p?G>{*aNJ~3Uz>nk7IO+fmdq(nQU;m@0g)+>iubI-SqM2@EDW0^NfV|RRc-H%# zQ+P0*ZJcnJCJyWcrOOTcLH7+y9z{nf3F| zT@kH#{}KZC%Hx=qwQ$cshskW0z(|eR=1AW0h{fEL*otUL4n+=K| z3birAVSwHOGIE?n7k;GSHlt5mvRntaKa@o_bsx-_7>$`}kyK$V!{xo!rs6^=8G?lHqdj|T7cR-k@4{g6=#cl0b4Biqi z=-bAdbjrsR#;@#$5ojp*Yg2Lc(=m|oPYA>wI0RRf#avQ$5bP}5gc)O1>qic0V1=PE zWRP1#KZSfB6zOB}4>wN6F@<6*Ik>hbnuaa8&LZpvKvDe&HvFF(`+H*`TAACjg>n0^ zaH=rROYJ1xRR)xN){sqmV#^*}+Q_!455)cf@#078ztyibm<#?!2E^S8BOjLs^z4m4 zuf5C&$FU#4%m?A@DqF6&`YugdIv+pVUx13~NmMd^BXmW55OdKdX9i-`IcbjEuQS;3uCSc5;WI^xmETi83j0=l6h$y5?MX?4MG z(W7VYm`TJDzNT*X|L3{X^S~JYTYj08AGky6wvXJyCv9|Oh&E(D5Il}=vLRB+75#ln zgxSnnUg7U@et@YaPE2dz2h6s`r0`@q`}Gv8;aq6>7JJYidw^WqqnLB0FG!ev=4F$| zu~BD2V6&ML^ShNzw_i;l-#&TFnyNss_AD81Rb)&1EP2D`1ZoM`%^w*d&CGpGnQTQp zzhRfai2Cs#_pxHUxJO`QjQXAkgJ%-GSu>oa_3q>ARHx$ZyiDHoRTgv|?dH6+uh3Ec z3*~l5vd}_-SvqDo)t`2!Va~fL zYhG^WVJIEa$_M}5$Gy2IOB!k2G{;HA7MxOrYwO10uqkGw*dai}4-IB_MwIizVr!g>z&PX_El&hYy{&=MGam8(R+o`Zm^9=FXUP}M z^`4r@d7tE_Hl5*=y6b7N$d+ww8N?>9Jq^hr=FES<7)+fL0Wr3@YW6>OYJt*(%B8WZ#j!+Z=ZP@kcgHEu0QXFTrybS>nOd-#P2`Hd9_qCl_d}jMZ9S z#YcC3g&ArJm?im&ZPmTYOJ?+Pzs}m=*OCf&cQy!y8=WST`Ke4kr&2t0o+R$tcm(#t zVf^>`3Ji>sWYdh+2t0E$l!*%#vhrhdB?jm}ccf z++M1UB^U0}>4??nQ96?CTD+h8UK+y|DYQ|w(sEoIe-;7~2eG{y_P~#Z`&8g~n2vak z#n6=|Slf0AJ#@A+oenG9B9+O~za%p=w}tG|rUgu*{Q%6-xC_6eDrx<~TQJ}AD^x`O zfD6A8cte4`?Nas>5+=rxntLWmF0sa~r-U7eu`;L(oPypZ1p+^2Jy^DncaY=09dI+qotK<|nmt%YJOZq60$T-KCS?TQ(e)o64iKi40Dx+rXB$jHI!lCEzpl zIgHZ0h>FSgIhCXrl<(*bJHL8i@1z&rPsMdhF>aM0gaFliWZlwvgGfDK*MP$Eu zDe-3?a=yzt`23eSoNl0t&;?$Db6?HH8{1T&4yxdv{sQpQ^G4I~ZqRvnftxh(E!8y? zanm=uKwPUH+wjjGeIjDOI)6Xq3ofn@qd@G?7|D9JXo4bBMK{+P_`A-G{q+;}emaeC zH9QA;1Fmx;r7Lk-a*wFSZw)MYGLog+aIEQp3gGN8=9BUlHa$qkh7lw2zeVv>ayfw= zlQ{~G#ee9!eJ%4{oekq8O`-m=4s$fkVa-)LQBO62WlKHc9gZM(=EX$(btQyldFdkP z4noVL-k4wNgR>XS#OF7fV0O7b#kpDloE^$H|ILT?fH~B#;xR4mcV~NgtfAu6JSf%+ zN3~D;Y3Pkhko9Xn*^EfVR^8`RoLB}17NsovrwXn2X{NZm?L}M((1pov&%ouov*MXa;-i8$*7&YPh$t7G6G366QpU zusF$>r6Gws9ftDw_(K<`c0fTba!_Sxp^N{&8YU7tHm}f>)o_*l%-3 z@K2Lu5s8}kSn!b?d25P=Ip;xV$VPZq)C@QNb7v{}g>bBE4E(;F%l|j=IApl5L9@b4 zS{3sU694F7-M?GFc~^m&#XHyiP^c zHap0vw?F2mISB9jFGa9$iy?C377FLxA%1L3jPqnSGqiQw&)0`ug!dPJ&={jUh`9IL zaX`B(|BhKQNmC`ZJz*Jhd~ZgdiZt=IRX$jUAESYt*SSNkh5}=*k-ma=UGOBV0Zz3G?7Z(qu+Pa2`@ftc@gHB9IWw1}nqE_Or8VUQ z-{hTL4w6CG3iv2gBc9B?N3VS<#G8b!tM&HlVBT<%+abS#jnVC=X1;{8F=*oI z{Y1>vdK_NdP{M^S(!-T&H1WtaJJ!)?OHQq`U{FF7$i?X3W9OIf@xvS;~te&>xqJiN>*q$Y&ShDJq3@X zG9ht`NaUAWz!@wWj0c+U!u!Jga8M_QFXuDqR;~@g=I<0_Js1_jb&-wFX0~C~@Vrcy zEbg!2!`hrFar0r2@3a+q%Z^~vT1tWTDq^iZSw6%k2}ad*h<+`e1xr3$BG=G!^slCX z(n~LJr$?&tVZUwzpI!sEuGq6_KQyrJrYXF?oyNI7jgjXlNP< zcc}O7W)zH+Q##I-!K-=~!T-0MZ=5UUuM;l$)rb${9Gx;NAmaZVX40Rar zil=eG_9Ru~ElSBg!95o;>N}6;a1SS1a6Pt#P_%Ly>(;m82c{7jm&U+@%%k-I-VWTi z2r->JIS@Bpv%(%>*L~mSK7ZpyFgvE4ip6u9$>qUZ9J%E=EVVGif&(Ee#6+E4TU^c5 z;wrhf?s4?>R3~hm=_;P`PK6#B9Tfi8C^Lz}uAr|+RDY(JDa230`m){3mLCakR!w3j z#|q4RVV}@fc9%v8zCzXu(lHMR zkeGtYSDl6YJ>O}6@IP$$SPW4U06%gHSQ&Q!hnFltV}av2pzs2>-|rA_?)-q&9F@W4 z<2u>wSBdP%S7YdV7btL}#+!prQ*$?cz(g8tCVu5n%`cf3z7}D>0FCH zjcQuVAH|_)q^ruN$;-mem`>3ImzVs7ziaT9g$kQ^JryhpGGW(SDI6GlmEJ^^(CPR8 zi670CV!xf7*@zY=&cQ#9U!>eja=|l61}DLP_Kxu6?G*Ol-a@AKQs|h(?n1S@9+Z2o zl+%w0W|g~oz^Up0j&6BN5k31LB~~4dYRL+&*>v<1u3Uw$0a61-;LtzVw@0(_d; z%gZmZXaCu$)3rUbXhr-|9KcNRdf8NVaPd2M-C>OCHxp^?8J-)IyM^vA-^uO%)J5rK z0hE)o0Hc{e}v(-LFhk<1V7Mw_(o9q|=qm!IPUBs;)Z@*7Q? zm=FCKdE^l)OMI3N|K{-~=y-7jwmhFsTW?OK7SDCG;M5Y>Y}?Th~a8%?94m7{jW6AvzNnx zolb1E<54Pi;vjnMLU8}uOy6y0;q9`Q)CoukA?;9Lc)E2XN(^YGN<-nU zqTa--JI3K=7boKVn(CFp2b~|LU?*B#pM+IS6WB~)x3%NX9u||DMn-PwT!2b|_{>Sc z4Q1d$s|#0A?O_L|Vzruv9FWHQWyLg5hr4|HgifZg|9Y)O6`dFsvNEG-o9_=L+? z6upcUjJ}NJp;~Oe=qHrgZ)7Dt7W|;_ZLILd6!tBNgEcKu{KReU=+Xa1yn2^9{x-V~ z4J)-63tL7%n%qezH4RoO4q+{ku54`bIt*OB1HJsWlkT_*8uRTOjOSaab8a5#M3|9b z?j*3t4@Bz~qp`PU1sytC%AKy5!)mMUIsa=_!p(ov;h?U-O+IJ?_di_*5B00C`}A6N zVuLkzT$p>;svf}sLZ&bB&^mHU6&Q*FYi)dDCb~OM;NDG^!@!yyc(x${j2=tClG=CV zUMnm1FTM%&Cp4JSD>w2k+s|HnU(aGCX3~j|Un#YJC2p8D6Z~e>fM(G%k#4O!8+9)T zM#Ps=6Z-;A6a3ji?N2c1hYh@1;|WLI2Q#;?&%_d_3i12W*q~<;;wY~Q7Ls5JT}J8R zjyi#>wNeo)m%M~|xArrg@yi4bS_&MSvKuGwSqF{BFM-|eT$t!7-21asSkSg5xK3gw zL@tOSkCC$M)%#A~T%rTE77hZ1DJCrROb=c9aR)B{y-C$%$KEy(7@I#M9&3{U2l_Uo_{DT=7qz6J$o9C#YbGMAH+QSQts z+Vx*I7)3YmKL4K6vzu4A*$<^bx$_`z+i1yEmj&{BMjRGjDIQO@%bvrb-GU3&paJ|0 z<-{8{OM=%geJq`N7_No&lfeW%D1QBdUXJ-oZS(%|mc8+`ZrMUyILd&xwK2g>M_WEtv-H}*Ob*4Ny@Ck zjKWE%Fg%zY95sP`oGSsr+eA#O_5c_NKEXYoOtFTqqt$KpX!%p{74C1QwrO)nGvFs@ zvECU}hm_G($KC9y$qBOIci|-2Q8?$;LUw;~4p{}svcP)Ah7CAKk1mcQi3EYIk)$ww zbw)2_XG$^~sYJB0@n`w^modiZ3($*7y6qFow%nM{DrDu^^%o`>^wt=zCqLor?=NNI zY&n{nnhN`;8*^u+w~_1Ccz)K$9*{kKl;34`fFCi)p5+}lLoVA*PRzqFTPaq!#@R-^*~Ao^hmOG2 zr&B0ic_Q|o7|h*O(ZV4n?>G--!F3)c$5j27a#WRoPqtm*zYje_=Yq>Az$=1HGpuEk zTfD_F7u9%;zm432``dBpOh;y_lLcdi47K_*^di@Vy;%q{YgU=hxKOkME5-C5iu%*%6W{u)MuSNHQx z!feCRG#N|8x)byey|TJn(ZC&tX&FUjv`v zqw!6$CiF%AMG38d5S3E>s_)U*mOIdy@py&ln>vT=ZM4 zE}c(Y%_A}yJ z)#X5zH-iG3*?jT7C!lY-i&YKP!wion6&TJp^-tG+54UBNks_fRAhXxF1t?amPJ|;=*QsUg{E3WPm^aV88Ij-n5<1I^4ND6dSH z+w1d!zxK_D$+gDgkh2xEebXCi&7H^POm8IdwsZXRDVy1d>9VZ-&}MQv`Ap!nr*VD@ zoUuZ1Nl3aS;-JFO>_U)$y^USTtTtqU5JY22uFc{_J`cDn8GoVpdIA4ztw`wlU%~l1 zJZ!D3tEuLeH_B+w$A{aDF*ax*VX`{ic=SeOpiu(lQ-%5e{zQmBqQU-#KHv-AJcAOw zApF^Pg|hM#*xS}eWb39x`{RqDPc;kn`g+3fT7>hpwrs@Db_~4og0w>8Ao*Jsc-w8j zak-0`UxPIToX!##)}6qiz7zPrLjGvX&}th0%$`Y2GY~J{Dvb_a@98dEPmZq&;nC{> zWG#OeWdF|K&m7;u^&U7$9oD!;b z^lZE-mOgaiRv0{jz+7RMw!jP}v|ouk{K~PQWB|_Tc~7e@ShLN&dC()AXO=COz+km^ z_*DKWyKKG^>`Mo;A&)Y^w>}!APNkvl5f?V2NE@@iNU^~>ODMy#mn>Bu+`E@rw^A^2-WnaDRiouCqn2J>#ieqJ=}D7vJ@8B<31k;|_)#W80i;*wbyx zX#F}L@{~}h%=fqAZ-yy~)88x?dWNp>?&%7lM=Lng^TcG|bC~Hp(5H!OR4{W>J<2qj zlBmX*uAa-~f8AKk`uYR7(kBw|_s?&BsBI05s2IXdjS!sZ_aa3rJ@nX-{&eu^(1H~c zJh86uAe4oB(ADNYFv>6(K9=dxjl@1!Iw)A#3gm z!;L3{X{xOchIG~NYYvSF@I`QCp7motJ5{lxFp+(1uY&E9S3_{8CjA#a5*ins zhuLw*>H0-$CYLYdfef|T+Z`(Gh^!jMr#W{H^I~92!UL6v37tyX?vfyoSlxEg{=0ny#g8=ul zAbspR1kCwFBe`E(Q*94vtGM9Zlb6}W(MtSt8)Mch_=O#Jy@VC(lq*NdWYEJE(wyrX zY4%I+6>Wd9k!ihH$_6?spv2k)c(LvTn>DvbJQQ89_jRMKL!uYnS!oPu*+ZG~%N1fH zWk=@zJ%ZC3wSz(n9O$9TNtz}%5$BbSgwLyo;M#}*OuS$#Ee_jE7Ya8szYQ+<)W;se zrv$>L&KjmaLGUK`cR^K`6I6SzVhuMea6>{2Y2Jw^CF!$l8uyFukeY>sasTMw^a9*+ z;}bKwvRv4+YSNn(^6b^!G!}E&7+00wCGV~XY`kR-ntn!Xto~g%&vLlK;wE-8=%PrX zWf^OFx)goyL_wZ<3|>$ZV+1TpfoO^=rob_Ve5i>UHRVDlwEP+xtG;n>y zB+$xG!kqd5rjk(3Ebh!lr=B#**=+$^r4GVYGkJDK^%N$g4Pynze+nH+rWI_#Oj@U+Wm6)&x4a9FmIUy&bA9lLmnsf+6pKw&M`HLq zB`DC5z&Xi!crRo-P06!h+4~n^vUdy3zgEp>E=Z?qjvlC2lfbqOU(82u5IBvpX6%l2 z6bl^uQ(XHxjh{cVn(poS4ter2(AFe4q)$e%0I34Z8(mBFqt{`gLffeo{DX zletK%T?xiZn=zICT<~K91U~Lo4l;_Fmv4_)>)$NceJ2|=%uU&cT2NAHI&3xmEy@G>W8eX=|M{lR&w6VT~<|dwI*X6yKIok>sf*IvbEW#6` z<)+%)EuaV4)1XYq@(h}iic;AgG&&w|NKY0PHZ;^%G;!iv2t=K%03MelV$MF!)7uWtcnBHD^pU|Fv^MOg4gOV(5Lta*Za9a zoKgLSwI^@H;NPF&uu?pKbF&IL^gCkuLMyCkaK+)5{ct`eLQ`raYU<4Z<=1~{MOp&p z@0$-*uY`9-`5hQ~Wjg-2@|(NXvl~x6K1n}soPuCW1xk#G&7!Lmcjh*;lXkaTp*1#UN zOn%B&XAWaWYO0|#;|@96PvT=_ce8IL%}iI=$MWHu*eq{THriiZr0}zg(!;wk`1EDs zT-xc2XB)S8@m_9#%^mpcCBa$(m05vX9s6{0uejrpDi>)L$vI6K%-*P7<(En~qEprr z()}|P!q3fTew9Fz3?8xv?<2{|Xe(NjH-UNQMTk@$&&rf8lGpb#O88G1+IO4Lo04}l zWvv7%S?Hj0QZoC#LtV5#cO5>QU=PZpV))(D;^@%xaTpgj1J4bA1`DeOurVWT_)S0d zL)d?MY>;O$SC#HddHd4I;pbV-;*}EH`*IT9$c#guzqeozH;W=(#B#etjubolDs=Rl z;@BOfurTC>*e`t_rq&pW9*JdXRc8^!9^zQ`O|S*w=vJf}na)Vm-)&Avo?p2Wk2FVM*s@?n3E%xLRt+_dSWA4eD|@Z&xSl3b{su zDu(jruC}1Fc_i6#8)(tI%g`|MG?=VUV7sTOGN_gcR#tn6V&-G`2PC2;>ZQV12qXfjgUzBdinH zul?HCTG}V<;w@Lxf0Z*ei=%Yz4Br)b9hcR$P~CzIJP=a_%OxtIMPm}ZRoa9Z z?!)o(wB0nJbH7;js0=E99>&t{HR9=;o}9Fm(5((S&)TL);;4}E>{H)zUdwn3SH4gY zM?_b{b-MtNS7+q#RAOrF_A}sb*TDH4%EWLtSLR}AOe!OWgTs#u95mAjZY>mM8Z~3N z2=2Ui+16AHo&B8#UQmYzqMPjW)dV5`;=)B&*TJaG=FFu3IGAbLjG#V~tU1EKCg6Ai9Hl%AUpgS8_h@Yq4YVY^SrzlLnZ zB4vH{sYV&2Y_33#=~{d~Y7B*&C1Et@h^GTr;*#zdxM!=zBm;b)Mt2-)eJ^6|J;A7c zMTxd;3#3zp&gl5B4xaBHkNQ6)u;igTd;9!2R=E&aNG`#9K5M92`VSp=oJkYv7BWxV zOwk)gE=dNJG7S1|B8}AwO;g|n#gM#y6?bkw=#1BdzM^yAHScBWK%C6W_8-J?04xRv^`VE{n_(V^f7BEz2KeL+9?@y z=zu4MZOY(UwoG78MkS$4w0A}N(<~a`HeJ|-{Xkp(t;ob}t@!WtM0EZ-4dnj3hG{Dg z!R{k%aP7oDu2WiyS*veB_s9RZD;jDT>ym;tmseoO{s}mH;3`VDGeMJ#WpHQYF50T1 zjH&Vmn5X+7mRXewv%h)M-9JWbaN&Qj|ND8!UT?!cJCuMwja68;dN?HA(?Pkk5{iAv zi{sxe;xa0=MM2rc+%3NW6kwDDvkPvJ)VqFq61W%MggxLD5+%?nD1|M1CWoIr2C?lG zJE&^qcdAT0NgA`R^3MGe`3{L zbNU1}=@4Ea^cH8{WsC1$cw*XjBW67IC|%WkL> zC=AyNf$bxu=keq{_EwPRG8@e-DP7)-T)a%ug>8z6C~ zp58Chrgr}RUYL#q8m?jGI zSq!(|2UJ8~K1k{={w(5wG|T_9557FsM2m~d(O|X_p3DvewY-mX`IiBAaUqAR+f78{ zrjEzoGySY4J!ZPzhlbqQ~G_8EmNQ)e!7e(+MgyG3=1X3S4( zINBKqUFUDEX#JpzpD^nwNi|i&B%P0tJ~9TSFOFxM#%i)FOV5~y##z2*GzxZVz>o6^_Y0$dMXPO?yBA&L|X#0;l%lm6d=s@OiSDO zobM@UVt$r2)+wUot6HX5@)=i_HIY`cE%&KY=!1l73*HrfcDuuf-TP>QUf<6Nj*Ce4 zTXF?k=N%7D_A@!_y-t5tQ-!la~e?k*uj$g2KiLB;E zR#-DdM@_O5zPplD>f*HV8(`N%fn9Td92;VDfZN-?o@ObXqsr03EavuoNZm7?td)1d z{8y@Io@~W(`a@Y}OE(T)Wg_gS4&wXfkt|yH%;)&+$D)T%sPSs1 zaMOdjo*K~2EjoCAtQop3%H*4Toe@`O;^QCc_@np|nBHE7tJiH~hQfV{-|>d8bbiQ5 zsO;gNZ`qF0y%OBws~@=%jep==UraU`hBP`fooPH-#b*roLLb_Y<$COeXKS13;M%90 zt@|4oepG^mzP>IpkUWGNWna;2nR0rpQ;j>@;;DN^k?=k`1Py+z-1F~@Zdiq}x-(B< z?aQ(B;fX7yYgNLxsB}8~w1)MUda#4uZY<3t8oq^nhYdaU7}8|J7602r^NxA3&$NX? z-p0Zp>l?hwrM3A(RgH=XZi^S_&)OH=R4(Z9=z?I;`Y z37P^tV4}SOb6@a)4DQE4)r`Aj<#v;5eI`*}@C#aeJrTl|3Fn((bD?efZqztb2nl|N z#6F27{LxcWSjW3KsGl>DN~;&LvjYF+#Z7&@Z?%aFiw%H92c7BqK|id~WN^tZ8|}6y zQ$s{0{kk)W{RzJVJ0~WA+(mhkc!l_aCGgrQR_wg?WVGowWi$18@b&)+gPsZI0@DTY12OYru^AKc=8O|Cdo%&+tvgZq1) zi_TeYg#R*h$-~W;)k-FjyY>QHG&HVaZ)-ZW$5p_D_eUsE@gRJ8QR>&V-dWQObS>XQ1 z!|7gp1N7*uVzMg-PiApH5yjht_p6H?jmzm5x%AwJqfrTPzcPWYC%NP940mR;^D;!3NaCTMv22q` zD*U&un6Rc0g8WCrrm8>W^msUQ_=iH)w{NhrC7R_Oh=6m;t+B>z18%$ihCi8DBwE(d z1`dSGiWm(L1)x`5LVpwh@iR-3ZV*icKA+;w} zkf^ADohm=UK$MPhQ{rjXTRWy$xd`pE1g}^|E!>zJia`qAY@O+OzE|%r{0a)ek>OdO zdnb*#-z$W(wU#WjHk!(A5nXLlAd6Nr%x_P@{wY>G=5D9V&vEeSL$}CNcn=j$x1l|+ zPLNMEFSZb}&ZY@B;Fr+PYpHlGG7XG?;Nd55js0OZXbhmYt{i_2uTQwoYR>))p_y}&V z@1T0vo3Hg17|nj`*pyXkN#jfcTchfZo`R=7EASHB{O8GCKJpPJSRCPHx}EWQ`BDDc z8DU$Y>C_eU8+3gqyt zz*+HMD6q^#xp>GuL)bY5qiEh&_}(c^1HSLcAnXt_9umJjDeguYe@*5b{n>Yif4WQsj=we$#qHG-7^&B}0BuV+_adAN zI8D?X?Ty_D$}IY4tVr9wjh+O|q4VKFNcik-$n~$J;`{gD6OLkTsY!HX)i>I?F;(pE zJqmBmHN~zU@-R?x5t_Z3j6+HTn)Gaxi8aHg9EDRvhMauSU_|s?~67 z?j+V^nu72C=8|#1U5M@(Sm_gC#q`WONn`XGGTEiT9!6fJx)J-tZp|Lpy>}Sb(~!s~ zxo*SrAM%;e<_69!=`^;4JrVT?W&viM~j`>JaUpBChzPhk`cs>m*%Z0)3Mzf$cS2E+XA-i7#^r@_Eb=$&V^1`$2R?%#aqm3rm&$_)L$os zRza$G&7v4JI&@iL{&@cW!Vf|hSdf=YOJ%wisgQp2Hy2WtNfqA*u$em&p(y$n<0PlE zKd+HlpLhm2nkT5-;3l|+RngGe<8)z?G@I_)%O&o(1)d8#>CewVnyI7-eTmA<$=ZnR znrnqFp-0e5cOvNJSo58tH*B*>Jxn@u1yicz(J!bfG#w2HU3Sf++SppK5l-Mfd%x$X9QM$ z9L(I_*725)?6L8F4L|QfDQ_6$!9^L{!pX;sQZ=SAmoJXg`@|Q#&+K5`;!HMj;9MB7 zG!2}G%EMXjG-|GPrZ;1pz+b(Seh9x=_j=aRnA203Q_C>EGO>Yn9%iKWFA|&{zktxE z>D)cd-6(O(pXoULraIeZxN5wMjd{MFpZ_%-YPuQt30>8Gi8jjaQ)QPE9oU=kpZVp{ z(PTYv3mji(5ATdC(b_o{PSwvqjmr)=e7+?FXIR7G&o=Dk#{HmhW(Zr^;75lP8U)8z z6-X9-V&;O={O6SllBy79OVv)KBEJ;e?KB|ge1&cC=S#fheG}Y1RGQAYO0jMAS*)`~ z6Fm>U=EDZ8MZ=AIaqIXj7&|)<_H^aLw~=GmVy8&ze!dK3$LrGlhG3G4GDP>+vdB>aY|}+yqP$IC8VkyYP*Fo2fi>C_8QyK=-2FgU9FR{M4>rwl=p? z_`pJex%lol^N@)H1J%FuV|WyFJk~(Bi(Q<{;GxXe_Y9Nw6|pKSGfp8#sObE7z`Ll# zbGR^CywUA64YC-HW>2FqXk(p_KToIGw@Lht z#!%1y$Z!=H%cIyGi!t=wFqnCsdM-BD@R!>nwHNnJY6|l2ojH@IDG=D#!`rOZ zgfp`UB=zFa{lH9?-KB=c+R2odZw)P3vpLC#X8wqk9|4c&T%aji+hJv62UJQ3I~wnJfoC!W zTmvLHNtrQh>x?9Pv0gaSzm9=sEqUg%D+2;bo%xLK<-|A7v32|LkfqjrLM$ZW{)cNO);F3vWkNbtQ+t{OE z{q_=^3^)P)1_j(;0~s#z#$ft$^ezO-+@~cQmg90oFBUfL3QTJc;da?vg%N@WIB)E7 zOiz~L8-!l#mfEA@(?ZAJ(M2D@GK*`idkeAG=iyo}CkVSIbZ)ZDnc;F-xZ5-m|5G%E zcOz4yKJ72nQWTKz8LAy!$JKM!zrOg9T1CC$M+5 z{9T}@Yc{vW+LsqcxC%3t7J3{siw{*RWe3%o`QU|2cv;)iSRW(2vUfc=nXiLq!rVY_ zkQ0koCt}Xy2C@zIs;IWh7|m7uS=l66(QRL6!R1-RnLSq^gY!pWdD356BJ#q6>tvZ` zk_nysaGd+jy{6oaUeG^v4vn^4BA)rUf+qTu(?H{)%yz#V=Vvhu#Wz*)giJl9Dy^XU z1*yE|%W}RnQp8?_HB;TKBn(P617DS3=;6gHV7`bBA3(a19f=O$ z&5mR?Q(E+VcpE*8J=A(2p7MS$-gcWKdNrUNr~cW_mueiN|NJ*WYT0G5dL{HZpQZ5L z|9o(%LlkwnO{R(eYPo4?%h>6WwJ?5gJpA|W1jZi^=EK4Q@YUZ{xb4R{O1u7=b++xs z&)2StZ)zDbo7EEh6vz4O-JyFh;M!QMi`Bw#jc8W(P*eQ;RTjBA1+wKWlH9q(iF|3v z+=}08_1GeZ;V{a%A08@&!40)(On>-nI3fH!lbLUM^+0{xTCxc5&wnfWs?oq_oiJiU zPVYuj>sDIzF&Juvv%uKJ>)77HG;+&9{I`}^)&$!E2H?8Gpum;)>iJ2&LVarrweYjoxsj_dQ{~t zi{}RS(e?kXLeJp>_+XqwjrC@1h3PYpdVdRR-fy53YkhX^^9!&TGz4D=+=x&YPx27h z@hRpoA-iE}Pz(K$_#hhcu9#F0IYP+C19(-yzuc{L#1)yv zbpB5_C@zq|hM8LQ_fd@KTumBPrN4&9&-Q|j-)M|ePk}dl6G*>5NvYZ=;dt+F(TR*~ z-~?w~>Y`zIvrB?K53I#!b4$6k9iBY+Yq6)!qhRu=axmXk&Xqsd%=Q1$q*qdzv^YH+ zCe86<9`<6f{gdldkU5v)EeEow4azt#wIz{EyI z7V`JOZDt++Qhgpt_3Wnq4Ewo(gWuASvLq@VW`?CULHtR$1rsmT@$L^3x$(WV z5V1&%j?r0swRkB^{q~O^&eg!lMGBNYE3l>{4G6j{}z^K_;no&0y- zfV4+_oc$RE+&jX8CE8Tc!ZK|>fkP zF+YrRKh2`GH3oFh9>spTgmrnGhU>yZkKA~XC zUXb5y2zi?Z!Sa+~`0rqxkP{e#^VJ^$U-pB)bYm!s%oqa?5;gFL=ppy@jZz}sR!-W z+X)x5gq#we&SIm7vX*hm!hGZoyq>EM<@$5j8R^vP`Zp9HGhYDujX z#+Z@$Q+!Rh3&f5pAitQ^F#XmI>ReomJ>Jf&bK^s9Rb~f;e;t8x{dFY2X+LuqG9GGF zoUy89ICD&SCwkr&!oLu|;jPp^!N0lMq;j8|D(9+1@HA2Q-;TnCHCf^iw|E?28P5t0 ztD!f>PuN4mKz8C_*k0lQj@H|S4!j{t=5n|b-lMqXJrQ8}eLtEyRoIT-DuwgCb1Di3 zw2*1SAe^u@5;lt_Fr^|fvwvPE6Zs{Z^ley@L3sRE=zDF4M(9R zbTUcFkHmljj^Ns%#y&~?gR`HeLgd4pv~YDW*qQx=WrOF59%Q)i9$O^XygU23(IGwX z%rA^w@`b+qw9~lP1eumf5w}D9kW1T{N&)8vau1JwfGQ0;Zk$IFq&zgigU?gR#ce7c znllO3b8%u>vx89mNEv_Ju7fCPDQrJl0p|iovQV4xuuaH=KG}X1E+%Ku$jU)n`paE> zP)R!#`I@7iS~ES_@M=A`xt$2Eu82}AGmf`4RXQ+Df7xac&!oJ6s4zK{g}t!nUwRb4`m#(o;IRqx z*D2wVK~H$wEFJ7WJCCW`HE}gp?O6Dy&CGt^K-`(FBC=Ol#NDekW+svwNI&$Dz`@)n z{_<^i#nr4*`r}}N50jrkS-u6%9TP=4DwQo&8Hq?6+!l5CLXvDD&E__dw=;ik( zU^%h?GIesqHmPq(^_VgI{4IEG4qo6}-h{Ef=$GU&b0Q7Y-3MM1qCs!xB=EW;O;IU{ z9Q_+kwnE;|_>&Z+NDrXvP|Q_-6ZT{Vx5)VL8#3rVg=-XsV8}of+*h@i%36JdE_Epl zJ`&CJ<8F!@f2`o%F76TL#@5X3ZallS^cq~%aK@ZJ#r%~1D4HN;0?v1XP@>|j$U*)d znEtJ#nPFnGv~@uKi#iJH2-dP76HXKx@ zVL5c#aGUEpc^)@5`Qy)R2QbK92R7bFKu;8U;7TuePxrz6p9hQC{L2#XJ-~}|xH%SX z?vKZA=MB7(&MW@k#}O#-uXv|(GjOf3ExU2w5hY#Dl6h?nHZROb_28Q## z)_b^B;wv=f^$996PR29A5xiXuhsr2;6xSu;H;Y)d@;zg#mn~x(E??p~seJNkd&%#t z-Omh!?CF?+^*B^%nJ8cAgt;ouVlDa?NG5UtYfGUl4z@Y5&RE7;XateQNt@?&$4n5p6XA-<69V?=)Q%5UhJXyRgWR{jNnRo zdWYPetiWj!x~yl&OrG;i7P%{$(5?R_q33!Ja-64*hQ`sfYw;&8aq|s|87JgTHZMcX z_-s1ZBkTo7rGT7~7Y5tN;-%Gc6tQGJ+I{wiF6Zg&2~^q~crX*peQGE$YA*9wYR1JD z9H6t=3UEPBn+0hABo&*XQ`lUz^qGneo_W(C$3^0n6}^0R(0;D+;c$pvE<@{6r6FsI z6$A_3-$e`Mu%bPJ)0i#KOlxn^6K@@ux@Rj?`x-->tQ|E3X=BCpOQh=E&1vuZz&aa@ znf(JdjCB6WN2ac$g63_uxt#{|`+zFRkCbOlDUwXV;}@r{V*-!g{-Uv+5wIgCgO*I%sQxE_coamJPk;$JLI!t97v3#C&#!qhOYr{~0^DDYZW@O` zW&LUP4ME_gGHNtPFZ^tE7q<>{Mns(2Z! zRZoR6V={5l;<0qD(Gfa3q|v_&z_4Ti+K;uiRZ5!+>B4-oA|a7g=tkqT_Co&7)-sgS zu|R%bJ6=0khDG}qu|qKzs950I=$?vV-iPGaik4nmQ#}VfJ^3Jv2$n&Gy2ZF+rVcEU zx((EFluvEXgt6bZQCjmDRQ{L3j^9_IkJ;yNFdM@by?(<>&Gdyk&Ic*WuLY#d@>#o; z2# zBOymLu}u6`yd9Eqw_vug*Sx421L@-(ggblzRt(apbf1$3lkeTNofmR|t?d@_|GOly ztBS`CMLm4DERaQ98^I=xjmE3D%$b_Izybbl4EDZ)x8>A#GLZFvwvmU}7R})xpDSef z7a!)Y%~QbV+KJrW)ibelzP{LUm^rVT{t3Cm!R*U|eK6{>KRPw&lLtEE{62XMy0}ji zWNyu*f@ZS;1b`jt|sE4^=H(`PC9% z={vJkUL#nsV=_NPV@5Z@W@hLgk7>E&u4Ix{AY9N%Zq z*vFwDv3NXgj~&XAf>L2Bz2P%$g&kqA9BlNG=f`b+442&WSc}J5f*Y&Yth!xn`F>xr zC`VQ;XN!O3Q>i3f5NrPyx*LK!N^#{Qa+$6GA3grS@XHx+E`K2AEEtc4Z(ow-YEAay zdN}#)aKK)jD5j?sOSOLKB17Zh%zEZaoEtp?r=EPmgZ&y?@0kbK;?{M%ThbSLz1hurUkO$f9#PIx8I{wb_I4 zaf<{E8DuTm7QUb5PT$WJ7s<0a+ymkd6WJ`wI#}jw!1hkMMhiSXi1)2k!kr)7F=Fdu z+r5XS@yl3SQnK8^4coq(PQ3M}Gq!iYC!rBEkJrJCL#D8D!6AG;dO6;5_JPAb_r?2% zjpf}sn>e+;ryOfnWqGrl@n&8cU0&|TG&j!1iXZD(y{8wyecVsFsk0ecrE2I?^-i|1 ze;lc~EJf8K0~oaMGc1dlOrB>1j!C^Wi+eE+6(cR7H%ptfxWtiPr6)gDcNf04lw7BbnT=pZuZC3K(-(Q^0wASC^P$PzW(6OE0dvzj9FpHtt-*1sd*s4$khH8rzpMkx~S{ zSZ7)hwi}*jGalWc89(CKt5cdNCT;Hg<_v1x)QuyLRZ*S&FIM|L2A7o&!+iaGIQX%f zZc3}6E_aq>ol|M#!3*rr<*Sq@us5#3>uVY$y&Zu9lM^iQRhjVwHlF&>+# zOl}v*te3#`<5WarEyoBf4{i29zY+2c3{gL62)NciWT6S^r1YW;#vP95s=j71hb!m# z?@XQD-!YVhOxa6=JEyTfle74wf7?;cjR(nFQNo=0F#LStM+zH$aC2u#;c|adly34P zqlR1DWxZ&Y<7!FDPm5{N^T~KvH636_G#O?^GP#-ZP`zpbuIj3UwfXL>ao7QzApesN z&(>fXCk4)JurZpw-hiP8wWc1ebYw@XT)24_!Td&D#!LBou&P@pA;S0~@H3Y%Enijk zn$LoL!za{jg1Ev`DW~cyF{|nDWWfes!Z~05Mhgip)$FTCOGqZeC2z|bz$nRPT zNqv1pS8~fiwrLrfoIE7v)ECi+W)G&f#)-P`MTs((whAu3ESk9I1FX9mN@kB7@oxV# zl->9YG7iPi*RZ|JYQT5uDD)LwolT(ZIh%QOHo*fm1+=_jPLKQp(D#@I7CripdZLcP z=gY-RRjz=D5%;dMf-K0fsa!$#b$K#cXf5~?#Cde<&c9>N<##85%3(2;xd~bLP6hk z@sivb>_l`Wow~gk<`0~YFKvRvf8S4mJ$c%IfyI&K^#MlT^Fg`p zNp#im3MlmCd-HkQOZecykYJN>~60~tm^))`J$DDnCwU}*!6l!nz zC%7qVY?XQ~VNGfZb3cQ$M>Q8K`>m;C=Tt~txEJpzRNy14R2F*VDcJqU!EFN9W7ga8 zcrvj`n86*#jYdQtdu=e&^cf$i|BsV#tLGYrOo5slH+1JtF`b%2SpKJ%+5+0x<(klpNi?vy&qeISaEr_+zuc zt@Is;H#UFc;xw&T&o>^yOnXhA-6aM(H;uhn85CIr|`WIclh>+s&rZ4Kg~VU0f#4k=hs+{ zM6J#r6#Oa`Zt{(s&dn_5J%1vO>@8<2&s4H0_fN3e)w@APaXaH?XrS|Db5x7o3IFsm zVCTtK@cyR)^Rk;JFjv1oxMMs#JFtbNJlVx|&#`0<=da?oc{7<(W)#*bR8dt#6pfNy zLw`T$V*c+KVEayi*U~Sr)AuP{SDHiZ4^ObDb#i#^bt&cDD<%E)(qML9BrrhU!PVGu z`uL!Tmma?f*O=YsABFYP!vn?ePw;tp1>R(GMpj&()^V<4V;p5JmB*2jgxSl?WO{r} z_|B?^a|`cPqke)dek(MtL9J|PryMs zGI(_Ve0uAZMs{@v*w_8Jbj`U0x@ud%f6jfbD(VJ~^*sW47qVgZ9&6MXuo#-Xzv0}y zi$VRZ5$+u)Pn&CsDPH-$c#_6ISp53}U$&+fLMuK|YsM(b=pKxr8WXtOB^&wcOW)yF zy{+79JxMfZ$)$(f0d8jG3G@sxpxF-QG&J!)7_6g4dwv-+nq$ii7VbSGjpW!u#Y^HI zlSG&o-w5KSLOM`czybumbf|O{B)B!w(wW`hb8DPWg)hDyc`y;6#Y#}Yw zmu4CJN3mG#v*kz1SBO*3zbA#mv2gfiIEEK$)A7Z7VS!=_d*2g4k|zbfLCzrFJAIsx z_qf6B(oTWRa&g>@t%ew}QX6&(KF#uQEllhgO!b$mNM*hecWh@MsC*j@W4e+=;%y{g;6_;&KQk_@vRczQyo2R)wz3BXIvEq7*yxiWU8`tW-J?ex1%Co6p6D=~AE!yYl?5|V zr)dcso7G#I4_t=RZ@xfjNIf+j-vU|pw8S6N+u&1m7k2ROiygc#@S67#*y8KJ4i&3N zuYY%r|D!JLMMtAon<9kwZxiQbb>iO_j`J63zHZ?XK-XN}f%~6-v{Cj5Iz$Y^`IS~2 z=c&d=PXvO${|<_5I)=8%=csj9ES?{|KsH2SDtUbmXY+^+a5?^w@Fa}r)uC`)czvx< zHo6~o7`<42&QJ%O#+(<<#8l8g1AqL}-kqxgW^i36SG>Ep2JWvNf|uvAC zH*uX{J9{9j6o%sRz8?W+{GsW0rM=$rxuj{6%5EzEWZ6!2!u0N%=x^4M4adpQBr_Q2 zH<|LN8HZr3)aCs4%!w=`9p#1Ja^Ul-GRXOy124uGgU-QTxcHbAsj+?+(^aVhBVdMS63W-cQZcZH*iM)2n`&5(3ho6H_mOH0(9>35>=bwMgA*j(o)4DsrwYgs(ZzIndu9w506cfqA=NaLP*OikHct9&?1+ z%Hwfdj2YeW*Tf}VKZvJpy7I)wTEgIy^?agVB^3|4EV+jDxyr9WtWI&IOgpBz@oat_;qq9t1LMQ z+3!kV&X?Q#bH)sg{yvxcUbT>Xek-a@e3Y1&4>yt3vRKd`x*678TY_#9XYo_r4lECd zWj*aU>iyf6%yRXFLFzFyHvc$^tIDDD=`MCz#&G{kUtWH#oSW8kl|Q!b;^IB-J*p1B zg15@FF)8&U&BUpAZ?Op~D-B?a>qofz$?i}ReE}4~@q;8$haGWsu9_W2{ zmF_vQY-ckKldkLYrNuX-JZd`nSM|a!Vdj{4=qo>-d|Z_MGUuQHBRJyf6LILHJbEop zrMg~a3YAI%;UzSEssZ>Pb)#J6qvWt+5hT63Bpm*d zBMu#)g>%;E!npAUwD4PrIQroRA^5=q2>p{UjCy2(uOIFty^&L4!Tp2O{xTE7mn4b3 zeY>NT#Aft(@f+TL)esaOrhuQ7GF~)ZPj32(VES{6vx-KNuxph&-Prj94%NRC^<5{4 z(Z7nw*1(MX15G(BzY||Qv4s|zDZurrLlhCP8+G3GZ!$ zXnCsGDXKs|RN_;tnXWIG>^((q+ouUxznuBsvK83UcwVk`@(1Ngd%?=~0qAvE6y&|q zsA9@yP#ZG`F4~yDi;;=2a{f#B?0FLcg3RGZ7lJRf8+q6~5ALO|#D6BNq5V&u(CSCI zIN*l#U5wZ(?&$nb{BKf*X!3Fac&wIS&}z9f(7 zb&<|L8X#`)Plx-4%9vI<6@Ikr#$Tyb6g0`1gTmjFcg;DPR~#(wbuJdWt*YdJe2FQs zYAE_m&kz?DAB6+^8f4FZl<|wrM`_FLP}F#1fvZg=53cTJJbWgL{tFIqHXj;Exn>vP z_TeP7r{nl-$1a|+*cP?BjKWFlB!1_b7Wud0M&Xn+0|`1enhlK8pylC8DLa%d2$S@= za+-iWyb^>F2FtN+v?k9|U&n&ZT%2U3fJ=gA;`u2DA<4y;t0hnJv*{&d{Bo1v^fHTQ zHK&qunGzaFK(VE#47sA*3m57<6kWB$sfV5pSIY8WTU3nTjD6YZbdI2#(igJYqxrny zU1|2_28~Ou)8v{Dv^1(k>@@Qr8J&!z@uj-hcdn9{aaVc<3|7F$=^tQPk011_eGi@) z{~rF1$cB!;_rkbTb6DY|;G!^Ine)43g3x~sJT%(^FK-3Ix5ff^`A_oQJY5Q-b#=JB zB!tW3El@cmUsyJ}7M}Rt#iuKhFlXN>idm<}lR{Pa-f~5FV4EV^sGh>?;di0?nnLI6 z4_?y3CniGewI)!caw>hIhhbGSaqx*k-Z=Fv7EX^5PD*!_1a*NwrY{2ZOWN2xa}b|h zZ-k1bUtm`CCTuE50-puDV7Z=_5LD-j$8U`h9VDheKMyBia84?uJn1P2#;?ij#Z9rq zu|iOL{TV)GDRIG^6EJPQ3b|!H61vt8;ZZgkn03A*?8__zud!Q2TkVBxGAovXGi%}A z_P?~k{~g>JI#kFl-Ve0;Eey}T4cprd`Hq7Oi|-|gj;}2+`m7QTxMwME`&JD-&GrcY z9F1Vt`c}G88ZYcfGev#5377A$5LK>ECi993;2b>(EmZq)L&!LeIMtm`znMzk_zX>b zy_=?~U4i^_w?QrUHN6}>10`)Yz45cc9lz~_5faq7(8?|p@jYLRzqt&qe^aBMxI`$K)JjEBBY2jl zKfco}6Vn^-Q)&<<+ru{8+q)2NybEU|@hpzPQtT1y zh(mrK;e85kA)-SLZN4N5HC9#lt6>{gtO>z7<8%C~i!Lusp9G&r^+A`&eYEX?4KM46 zvXM3^l(sTS9`pDpZalP%Mu(RpY*;O^rJq8A0dSdJ7ks)r3+q0t=af=QXgyTMdag6! z(w)($Ze0t>)2G1cigVE8_CD}2_u~QjmwEQ37Exiv1-i4w17D43g%dt{r-#mK6BMV~ z^RL!JG`WYAWw>oYQzFh`iH8+$us5dAoGMBkxd$REg6P-W9HHCosl3eB0rq><3xPQu zT%M~Mv#HH7h-Y>cUMh#n_dzV0T+p`5ITmce#Nqd$- zy*coU37={?kEzGv*n5F8DcsSN+~`wqPTC&P-ZP0XSC^m6%faj}x5)SPF@Ceb7fgan z$*sVFO#EAE%6KK39xG*Ze@>=sdJ0@MM^CD2oYz}S?>5$ z7?Rrn!&_!wL8{cDdC_SIBhN!FK8SU#$8E z-YqldWznXTx?72+8f*lemd&!Z@@%ku_H9+QlX>9!P@w0PH(mP8pX5bA*-(Z_SIMA4Brk zWS2W;rf$HaC0gRC;(^drw;j~hn3C0%Lf#pc$_WSO;QoKgTwTx^lzUs@_q8YJGL_b{cw}8$?GZpDD4lW5fwt?k+snB@~nae8qmBJx1~k1=8+tTI2c8~4p;F(z9xUa^p)1XkO`hrA4F^5TI_l`0qL$k ze=~bT{kud7I*D53+D#2#yi9=ngX%mbD+C>*dg0t3F?h*lq;!6Vg5$g`*!J#F)vlJ~ zVvS`ym#2H+zcIR8edR55pC+*=M-}pbA)n!Xv@(k(i}0i5FFCU~A7)WKZE2k?C`)|r zD^?D8qk0Gj9vFwU8(ve#wg|30v=8!}vY~74T&B+^f`dZ5@JB5X=YM1=S6@ecB;HbE z>mUp|u#8(5hG0R|AHLg`jotP>6CE_Kiguol;BGf1_DFC;$8DW>;9qCZaxUZ+!zPHE zJ&}jDyJ3RO5->HAp7CnkagWPZu}k(Fni0?kOX|LYPkkE|w?)$WAr;ia<~`Lqh4G9U zHS+7zMX33(2qRqA!iN{0JZ=e)@}?O0vR{!y8Xr>ToOK*v@=LI6*~~3u&8qpia8md! zNK{*4-{9zQQb#LX&suOo+e=6%VH@YDzm`)T41lP?su zHJ^j0<K}uVff0U-{7*=T zt;eHJEn(}Fn=r<9EcO{bPWEACB&;=5X2ofd@ZRwqylE}MrN$08=Hn4C8<9XKJ}nmO zN>*XYh+1&=?8kT42V>syNIa#a2@{H3xNY?|S#i4&HXZW7Pk+ilDSR(4U$4rpAI7uq zFD8Fi2Nc%s<~6q(XkK~(X~xZi&0jqEa7Q&v`_+L$KMsZyDdzz0NCbbQ8=SZ<14F!Z zG4FaZV#gxUDkO(rNO_-YX@lrhR3d*~S_rDgcX6QPixrJm($<&%=wkVM@#bWAatJ?4 zUT*EM;pHZBUiFnq-VbDXdL=o9`SJtfalC$12#IU#L5t;iPC70R?bQTZ)fisien(8UJppIadx@h0 zEgSk>a3a~OmB9OFF;2j z-Tj?$s@+)rHm4W-^}R~FuJqwQyI-Tp&%dJDlyUs@Z-WpxaVmdvC}Qiss{DHEL_B*& z28S<=$D$z(@`b5ZJT}LUV~c+9jEowJGwTlB#w6fDzy7i*E$aL!S5>a3F2`P8+0<{_ z4PHAV8DVWAdhIHrT}E%*p@atv_jK7pW8RJco}r`$(VXAnN9+g$7$JFsxTJ-}`zR@-2!Xcklzq3D1Je z87bn1sfrl7ZaRNhktXZiZ6-ur`AOG)HB+JNpcs>GkBKvM#bAS6aZd3Up;yg&apx9O zHY~pbadJ2OoiLw9MYO}5;e$DUoB=B?IzqjsZ@}KCCHJcK7OBg>1Y(!nBA+x(yq$bZ z+}M94@hc5^;UjCzY>N>dj8~`{Mw_U&uLy5Ge}r4g*;3bF}O^*!Bo^^FK_7f3B3WxDyaC3t6nLJt|{q8_$>b)Bqmn(_y+`mD<{uH|NXe-rU4HG_Dj}f;By)oT% ztKfSKsj_(%T-g)=P1mEqyGnzmJyPLMPxSaXJ%9?kG(4gFMR2Si$mh2OV`KMel%bR- z|IvR0J^ZL8T(8@NuWTyB=bIkU=d=k>+U*`(E!)D|(@qEltq16(!9*wwvqR-A8r)1j zh36@+!8_R&Rc_dVR+mj&r~eb_=GR2h2x!h+=g}m}s4gEcQ4r}Dv zcuB<(H&3+VPe$JSV4fdtI+{qThmwVy-OA8u_#E74+Z#8IvX=!71NN0XR~4OO`C{z_ z*j+Kd>e$nAVQJn=SUD8lVkEk+N~JwJgV5-26_x}C zW8~L#`mDLW@@=3p6b={-DH(^jVa*J2dVm6FHwRD;!vx-L)tUS*nZfT{G3fQLmi8=I zgv&3+;eSUXX!+tbI9$I-)LuOeADr8RO;^v+bEO7Idf`mY=QQbX!*a0Aa^D!dsn1dCL< zlA6tM)H5aW9F z#lBZX!yq$mJvT}|E55Wm|L6~B(BZ3C0UANGM<_$~ldA7#lF9dyEPi6%Iqx&>yZo3N_tXQ&`IzNudU ziOF%|VU=u_{nf<4E}?k;QK@{|&B3TpxC?g}Eu$5VW_;?747Zo*^4~AdNzChl?e8Kv z&wL*|=>HSistl?4_Q9v2%4!0FsT+WzVCLb7?=#xgZBuR1N`vbV~MFz zb_FhrIiweMgMx~-N#1M~T%C6foQpPMX`U4PwEQievEEGzQ&P}3*8t@W>*?vnAiQ93 zl%`}=l1HajnTeB+SUmfr`1r|V8hGvu1sxiIPYY5w*PvWH=IIL-rZ4EN;Raf9&PPyv zGZu~RxnocaH?cr8j=!-qOdkqp+KewkXs>5N(jcuHA z$PZVAo`BN*7liL`YUSB~^yOfeO@>yMc#bmaLRy5U|$27e-<<=Hb~ZGH#Tb;tp8V;j8rPC#pOC33&}0^GjdrVBxV znEf`LZuA%pVG0&p-Rgx0>Ml|D1>-1n^j|92*i5<&3Fshos(QJ+Cfl4V_~G?SacjX5 zuo_w#|j`*@n=qXNL5iDsdoP%7uwr3;EhqMOK`%ik|db zB5SEX#1H%W3E@f4Bp-Aa_OZyI+LuSB{X3v0Hv zc#7s59C7L;6MW@5nosuc$5wW;s3iOkjYzysGrT9lh@PGBb@NmTbrxv;n@lQK$d@H( zT2c4yZR9+6FsoEXaciYA?LE%)Le}wg?u22uVpTUbx$437A(}8aS;|=GoDfPce1a*8 zOW1IV8J8dR74}Oki$tf+od33g^k;k*etcCz)eZx(%h@C$;O$e{r`JRH^Z3Pl@^XrE z(X_YVGN=d!{<#Nn!34Q;e1!22#{*Ry6Mo(eg!>Ej;cIF4bbGwI&^0McxbjXPnm@f2 z##s%-ecjY07Fh&(6+9H~W(-5KwVSYGb0gY~O6FV+q#Jd0V(0Ib!jMj@G2@7odm4U= zhDbB3qGYM_96cZHMvjMi3xBvMe1otLKEfHW;$`;5@&~W7gznbW!jEp>s&=Z?lTOS- z&?#PonKugP=dWH^F*=oU^e(~DBYlJ&c0*39+}zLeVvNzXECW-z>Ix7b^+5_tx#4>Z zk8+jzOVog81rNs9j&$m=v@%G31C}rQgO1@*j*s%flN~$ZqSO8HYLYV=^|X=Ro3u|4 zQa9oLla}GV>_fac*ATU1?n0fj2i0Hqgy`W)RCn5t>$(KfhuI#e`28dtPcDaA15F4@ zw1aA66`uRc6U%%hpVjLJV30LNexO?=C9C!2!_P%9OVlHy2U)P{W}5Rfk08i!wdY;W zYGM9SN9VvcMKpPO2^-xP^9+p)4v#1R%XjO=V6{p}w$0Cf`u2)dX ziO1sY&p~|aZC4y)Y*M9L*+XcuGZq*1ZKXR-ho~UA3)hvk3Qw)i46@n7J3)==_v*|L$GsLqE058oe=I0ajVXTcR_t|Jm!})0iE@_$9IX^DPP%nk z)K;5GLlr_XZ@Q<`^|&ilo(iWWhkYs8=x>gKgIWE?eCC6a4|7mYD12wc+e60+EwBbwJy+lzYZmjIIhnY#K@E1q z9G0iVrE%M*o6uqvgR|AH(9Py?{Ho!oP^ET>9!oSEeVcmutDhdUs$?*Eg$B}1<6+{t z|57k<%mVo3z8ZhbOyEp(grx~HVEmLicye9ZIe4XrRRIR{R@&8hJ#iQFLtXe*`&`aD zFM#Hn9}qpMkWcmB!uQG)#p+)#M74V_;q4L=cKmq&KW*%TPMSwqIe90pe42_;GuvQZ z#!2x)qjV?I>P0*L4#8pbuamuAG2H6X6(2s274%iLd68KyOkEMhVKuQ7e{n61-{6N! zBC_Ds9y8F&OB6mx^WM1+2hc~CU@mwK6mi-Fyi*kUS5!3Lv{8bHlmv=C5P;Wzx{-Uz z0MT4ytFwcubO(Ajovmye>CWQS;FkCt=BS*+uHHRp+Lp_r%F3;fQ4j+A-`t_ymnF_b z`9Ao!B~j2z-3)WJwqyJFope>bR_xrtPMl$Jo=zJo@~Lwt=t6BOwyj=Dv1dBMU%e^U<^5%$fPuUXAG9^8nU#t&~T#xi>@dO`@A}!;-TJDVt-8-og>42dKY2f%9HrM z=R3N$^);NR-67a!{GsfTYjNz^?cnch&Hq{!a+OveZjP@J<9aRl%S0MQa|@^%SM_$-wU&E2!$<6W&)cm!j%B z<4@zYxV8B{c-SR)OtGG8)kIn}#hpNe5HUaL+rPfh{7r{>2`WMa{;n8!2Sz_Ka??O`%N- zCeU{<;$B0mD6nrDrUvJMPh_s#@q0TN{MybzI$L3~MK2upX(f974B=fY@;G}H_z+qH zj=QCtvAz#q{{0&F+&uvXE3BbnMKBhhj2Ftr4#loT4~R2nz^R6CGS&dfImae?O+q zCj=dGo<0oj%?d>2JH7eh$Z)K1FQjr!ZSK9KfD0`5hbfOyrrQpDAq;cg-%xdtk@PGPtz2B+WnJR7Mh4I z+OK)r$nW9++Xvv8YXVts8&Uzw5pLylz z=4H>rO9$f1M0M;aF=9%>w}~@KrOzn+0^q=SL3j3j+M!rVcaP=6ABp?EeZf#PRLjGq z=9{Sbpf2|Q&>-zUKfr+?Gx)sKfZNVli!Bdk(f8mTcyU-?44eLwdbU~7rqYq%V=e6^ zqa`2t{e80Bygg#$0550^QRllOn;2b;Z?Kc`Pu2B`5 zM`XZ_pTEJVL>VWlI$%;#5WK%4;H%RCcyfE3cr6ukT5m`lKQ@a=_S!h1r#d~7 znD58WE#@gku5j?4DXw_G64y3v6+0HG(*GPPsGB`)?cA9frk|JNlR3U!H zVGfl%HgD#Z;EK0#?B{hvkVhYe8J*N5x8n?%$B8PiaA+4Iv`+|oJ&dSx<4#E5^hKCz zydN}`nYQd5h0{kWk;~1O!b!)qD7viWNQ3xTDxHn4DN6q zmU-_IY^5%@6Pl2dY7eXoGQ{qa6WF0-i#X`)3UqpQjy^Q)mc34M!&-_%$HRU&-t(5Y zVL||J*2;urk52qE=pj|QodQJ%1%$F*^jRilQtC|T?DJtbEjb%|JXhz1-+qAkaaUel zA1?d}jso?PIZ!%IpSm5}2hUcWqM;XVz}k-IN$d3+NY?0uPJ2$$$oKcfnw(s?op)2% zVY?KEzDs6xeP?+@_jKWsS~7igt)(7cCUAMd7D4yoZwQg-=Oa`yaLgQYUa-`i-VNQ$ z?L7fTSj-fb40=b|qY|)}XHQnrFo(jNCm^4nM0TAkp>?_sKB`fqbM{}s2Iel=~L!~i=Fy~4JJaoGN!*{sBWe+v<{UothGEyk){Tg_%*)BuH(>Ea3uO};P8qKN3M?ga>5Vomp z2T-fU8smZd{Ph@8>DnL|;tD({vGO)_wS(ZNJ;}6R7aHVs2L20kgq3(tc=F}9XwmpW z82@f7?QPxUJh{k>Yo+h)p%J-wctVtTV^1$Q>%AAer1PQQ&e^Q~*o7aQlR5#dwQx1Z z9pS(N?!MnnY}@jS{u9e-LV*X{Ec`4hs`yU`o7pD$2c^92^xoXhqm}wyv=WYKcVb~l zG#wbKA|xD*!`oxlq1FRc`t9EbgD#nguNOU`#`MMX>-%q-3BL5~S~vdqvKCr&w?O>D zR`_}AJ{+<=A=mnv3l7yLRBsi_HOViO|x*d zR%crF-i$t2ynwW6K0JMi7w?ZV;fyH~N6ljw4J>b#EzRi7pVH={@kwi0qNNK2Z`7B! z%uIn_pRUs5k`EAB5{dn)O2i$*i?H*{Kio_5wDD*Mv~xd(|E6u_i$8)m=+zH$cX~*L zFPuQ%Z53)1Y~;ekBjWl0)#*GBpF9&l@ z=}_)AONq=M#KBX(Lm$V#0rlgXxqgu*_E=rS4TqD2ej^^iS=}DI>!=3s*&3L0u!Ndg z0_cbOdiL8s2770zuu7kl%5*8)R6ZoMYO3UEy3k|}zHg!^ZeKO{K1r0az9o38pet^7 z^1>0UR_2`>p>Ei5#!5WS!2K>7-Q`p^F?)`e7p@OuZmE?!Ev zLW1#Oz#R&@8i(_Zv@yB(x%k4p8*eN4O98X30h%O_`1@GlaKsjgak^8SWZ{NZ+9&zi z{mZmu{TZQhM<4djJPcp;I$&)FXSVuo0bfEpQSF&)l<0Sl@>U45{zQ z)6ir%2aA`s%C~*YrLT?4x%|75VCXd(TbFefD_aNC8pCWp?K%iN-%n(7n>DnDHCPBt z2NlbSXi^i8iai5Zxkm;Hdosvlm&l>Z*3!K;5Apo%eE6$dz^2`M!)OO-2ih;&sVU(u zMEx-03nR7gQpW+famo=oHuj~gEPNn)jXr^GS^;^d^f>ZLBJ4#=ey5cqxXi7Gjh`2u?4NqRDe<9S5ak42`@b{5-K-L%F z##o#3#tvQZ!sb%x@5^HQdTr_ZFcXdble{V}TKuKD9ll>O!=QOZ6uS4fJfa~TLZdA( zF-#fRt3PUf5U~3vME|5tY*lC_?mY2aczP&A>Ss65)A3sT)JTb%ix04WNGAEZJ|&$^ zwet0Ax5My#&%oCtLh24wiBFZy^Kh)3~1m&!-fwJRgq* z>*43&d8EF)k}ps`POS(M^Djvp=v_O=>ChQ)4sM~4tea5PGDPZ(9D}Nb!|_I^=TOsZ zjN?Xc2a^ri{PEFp^!%D9?vVK9myb(5m?H|D@?{ZcB!%*oUrX`b=w-0m#EcEGmK%&V z)2{o?sIBxvoK~DqwYM(Pt=tHnKl>(Z-BAX+>+8YXD3ua|RQPtUrQmcp25Kxtn5eIX zM`IeCV=kugy~hrG`{^CRefL4u`UAvV1zKp3&i%&bQA^+s(B13CgR9q}dTtWEYOsJS z(wr^lwB$86iXugm3&Qlce9CCBBCBTL*~=6-=bicQy{94hm8Z*=B~65!Drz3`@YX-KKDBV>kZ(s9ETAJb}COpbrK^i^c88t471iLY|*cvvC2K=#x z@IpICTCU02E3-*sjs`C%mi~Y9;z40<7d#R7kD|X#fYF9#sPHZrL%hOS<;`try(;BY z?mmWp&jax1^=eR1eF1~(d-9*pn=!cZzBEhNBh!LYFz9w$E$*9EG> zN0V}}{x=`azpsJOu9dLc#TF7!(>e;`AX>rrGBiG{|@GpZFl~R(C08Qym2EM`l0Tu1u%9+Btw!~tG@c=uy(@tOp`DazXq z)_;SdFuCSjDBQ~|4RI21>QGlYtk@4W^K+po}>Q;m>#Wi*ed*n+3Oj^Mqe_QJng&4T-v!K~cJhpoIO zVEnBiV4IW%PjE8~?QTJ4ldHs};XcuiKkwHumv4-n@6vg2F1(m7<=ky4c((EGUo zxMhI=ZOszHZAee-zH<_N4pQK0(mA4%6fOP^Iw= zAAAce@LTvWHdX8<)Ge|j7sl-a1 zptbMe*5=JPMDITQR_%o05%EOxKG6C9Zji3TkT5RojO7LMK;f@s>Xd0>=d9f{blK&q zMHeLQn062?uv#DvdC^SQ!gbNg=_JIB{3&|GJ(M@7>+%+>k?=2ZIOk89PSb}qlf_6C zZhbUVG(bIkc|Zs3dj_C}(q=r#o$*1x|3FD69b?aCL7)549J3=<4563ulX*jUoAoVN zzGo=jUSo)RC#d7x4IRY=ddSLBhIQe(PHYk8BDuqQ;0CJ~vTn0Sb039N_UaxByQdAM zWmgt+;_Z5&@Y-4)J3x)C0!*bmWKZ0punrbyIm6aKWgNIa7YCi{&H3R^X~x`(bk$Vy z;~1V3t11kj=a@TC-^HB1NnRPJz)O(4S`BY4`3tJu+Cgr;S?pO?EdOX<0jcZmkU^(% zcpnyvSw^Z<<-HyZ7yl4TCVZmbidQ8MmWqCtxpAA1K*l zM(H;OQe>a+Vs!tXq-R?VDzeV(;I9pJZIgLRpwt7pX3t-XM#A4`T5OX%OOPG74&hbm zk^|%*#Roo@D*e_tM)3qjJHMfR2IHWp!JXZLvdQ{jny~h(2QLp+#bYAH@(?VLvi%l1ILuNlT2Hi9qT3TTgGM?SgdKMwBN zSy*E~0E4d`f>(2Q(TcQ5-1DY}7-btQT4)RqjJ`{`a34R?+iSu-H(An=q&{eqwp)Cv z6D+PhnM1qBZI(O*Wl%ML8}&8uM_b1n+UY$Pg6@u%9g(t7D;G(5v{j}!qLUr_c1z=j zUo{}^PPU-abrk-(-$1oZN1-5T93J7Es`H*Q9NXHRN3NWWnQptNR_bOP%1D>)(p~XT zf)1~%55pt!2>PQv58T?6<#nA$a@2iSd|7YA1*7}XAC0dRwkHWre<&Bv~A}_6Qn|?u4{O|oWcpL5p$Jbl)%lN2(+ZsLq{p`N`GOw(FvHmGX_?*`EptGYIM7F6^{MX0VS<{Lfe|d7};V;R}{ZN z#rfV0YX_r&jx~&W)C|`elBwF%p2uX578U$Wu-AYxvi!V;Xl54-$NAX(6jA!&Au#_` z6PY#)P@n6t;SPFaLnm9;0kE?HA0dI+Aa(UNx z`9!D3Vrs_^VxyKlXc~SK-bZF}(wuW(l%C2BCrZeD>prORS&HFi66fnbN8EY!7hc|B zf|FI^B)^{lFFP5|k8io*B&}AV=COvb#(6v~sBpjyOWa5ZRfY(+XdW1UpSC;22xpy6 z31zyAq+l8nZ`FH{sUL+MN)KVpsyd-o?1}4JdUD)qEn!ao zJUH6i4()xT+3nF&E+6w63@13zl+Pz<#pF%cH~Ee*{&0IJK;J=QnCQf4EVL9#ik|d@q zY!?m`NdLPr{m|rS9S9p*Y3z_3u{m}S4^h}jg|puZp2)_Y4;Ir`#7a(*Uic-<6D}Lxly#R_WNX@dages9_{eN1d>y2MstPhz z3|}a$dj3V2)L#(}{z#w^8;62fRvXl3713s&6GGMK&v3A~l7d$|!{MptU}xXXq_Lnk zwBB0;qk7c}Ub0#t^r@6H8MaNBy`h-20%xGh;p^h+%oi|QyEiwF=m^hV>O;<_*|6j8 zN7@-b1cvLx;ex3oIyrViJr7eXSRP8ZRASIPWGyH-9pQ0bTqrHMg%Us5V%Jfd;8iyj zE*)A70S?>1t|*YB?>O>(if7+}#Fpb#*-zs6%t$gFBTjSEkOp z2#juQ7fgD$N>9Q{Ra=rY-{XQ-6`n;W2IBw}5}l*5Unk>@fbJ23IFaZk&1( zc71aShnx=Jon`*Gn)G2%+8+2X6!>7+4*DVSpA1JHf}przl25oB^*)k^Q%lIo-3m)~EwwQ`)$YtBMg?b1VnL`=oSudj;sTn)2A57O09ewdy%p2h!KNGHsR z&kNUu);BiP^rBE6^s$Y;j_t#ik;Cv}xT?%`U>}~*v{ml^FCFTt)X8<13dR`r!JiU~ z^KMxl6>N?ph08_4)3rZfqsdP=vA|o()$585PMz@PE+1j!N6B&BD^ZMn@B-om5F2|$ ziN!@89J*KP%*OdpPR&PHwxBzoeRiDcY`2OEA65vX&%CB^A(X%5MdB#&C1k8QNh?ne zN7n2mR%YwK)UZ7Ad`wl%1BcL`q-;95y=8JacVCIc#@j;U-!*fU4Ug(c|=TGO9 zRTsr9|1G@jV6kXYG>E-Hlg&=pl3~x&T-R_|^hp>1Q)?4p&yqwwZh8$a&l(Gf+Xo3| z(Mwou!Z_!MMNC5%Z6j~3%}}hCPcG?FPb+4>JiEwTe6jo%_Vu2~Z$@nr8>ZQUUzR^F zOUuXMi5p?if5td}Y%0uO;vhP<_+fzVd>&JE3|wo1u}9@qNC|WiERJ``Vp(w49`E)Q9)>{vh%; zJx*I{#`LeRJn^v)P7b;%q?=8o2ga8uu&h#S4XPJAm^fPIg9Rm zno9*=oN?drj{L!PAfN7#O4c_m>DjJR6uxu^J?W#seN)tM>3Khlek{WYo+aY;l?R0; z!$ab}1iJPdkmzzair z@VxpeI%|_Ae70WzmyR>|G`2u_q78H|RT8=;#KO$M!{jr*#*>A5%l{0Wd035I6vZ`{ zN`o|MkOmb+e39^=zLy8833>hM%B2&Ki z|DLBm?!D*iv-VoQ1@7m~xToWl+2Fwmu;RsjSUFfq*oP>B{?~;j&k0L*{@D)h z!|hw*nI9`DSPjwclOMZ$UJ37=>!i@-n%vidn974q^Kq|)2l`$CR=miJx@}}}v%_>K zmPq8^hF5~Eyc_$|E-(Z|Dcsz^Nw~vEmHiqb_>Z;XXewWg1%7h$XF>znp1nxg=Z!H@ zD;k%E-GXt`Y|jZaPInny)Z+>*ikwXrkU&WY~Wt^$0lu7tz4@yl@kAV<)4 z+Xt1;roeZlKo)*`6BulAWI0EB;MOH|RGYPxU#VRLhs*;ZY-JxOf71tzVs-IJ=4G1W zr-4PshOxBgO6dC6fH`dugQTtuE?sTI`5PQ2s~Qa!<$RyAT{iR5O$%wzzj-uA;F#@s zFa$riEQkBMCg3l!r-SO2Y_8=tR=qU?owumstqJDrhH5dXioWvAY&?8wNEhCDk9n!- zMs$9=2_BQEW=rZnQL$?=oWD2*_f0F~FXMRDo!kg^>z`Mhx~Rq0EZd9S0yk7K`35v@ znhnl{iL`uV3>Eu)<7^ya@S<%NU5l<3x)Kk;Q|Cg?Dy4`IUojP2M|6Ollh8uf!bG_z zi}@YvRIve-}S|4Gx>SvhW zZ^_Q~EaHZy2Ltan1=}ySK<36+;d8nV;B4+~LT(FYmE|lS2K3oQ2 zEhm~SzZxp%3}#!aC!krAF5BM`NRD1=6e2!=SLJP($=XDi<~;*fXqEAb{*k!$>>P4- zdx628`*@G`^)zjdG5b1S7ga_)5O~g#*qmb`Fo)OB4tITK>b95ZU+N-GDjn9!9OY%o zAJXpd9MW@sNOxtH2^_0xP9;(-+|~!7b@dPNtoX^aS>PkdTp^U^aZ?vr>6w|N9S*`u`v4`S5>EH zf~Q+*i6-b8wu(zaLeRe^hgaS(o|Vq|3Dwf=5c4Vr9JdaDPUU(?Q?Fzr$EQ*Mf?E1A zAfE!F=2OI@8xXkf2mdVkps2v$9@oA2xSdzcR*}}tP@L$o9e%rC7cW+*A=6tzFa2IS zCOiEYsE7jDx$YX_v$TPGTHpAv-Qm!`s*?}%d`s>}_w%WTw_vW%Jn$S}29{Ztxc1#V zy!s@byzl-6OG6zF6BSS^=nC`Na0y=Kt%IZlC9u7^1vI!6aDks(d_|<_ufUXFvniQ7 z^yRsDT!9zPsVb$wwWa(=DH(KKv>8$ti*c8Y8`@QxvXm>68NcPbz+9Y2lZ%W%?xZgK zyAwkW3p3$ok0PoTy@d}!Q?c$~2*0*@4qkaPoZqzlEcp&z2@NN5kS$Dx8)a^E&3Pb8 z`4Pv$g0kpXjNl;@d_i}FU7rV}(0_lUC`iOm#ryJ0}m?;k)< zlQh1Q?V^wyXJGim;cT1Ke^72dj&4Z?;dYS2`blL%4m*xJt`rSDf?mJTeT8_vD28Hg z1KYDPpTC|m2@9M4uw<_U_9#3bXKxt*H|yltrY#8H6_DtY{dBoQvVc?T}Fl(P*{nf$(2J6Y0^a2%Pxl122rgyQpZtlHr$ zJu7MCXB01E7Jc6^tnZsh*X$Y_pQeRhbrPsJN*leDmvOhXFR_|f1*Y}iQTkoW)m2?C9LZ`^E>$fBAq@VfC2s7K)h z$)yF5@21~i@GA$NJ87|X#eKYL_X3h0bCFH4+{YRCr^4o+11i5d=5j+6t1li9C; zB@71IGbP!2{;z8RG*oa@yW<{r+^&za%st0T8mF;kBM0#X8-9r2sbsTF0;8($;b4$` za+f3wC2+CNdD`G3{14j%=r6pEnVQ?FxKyvNkV##yv)5dnL*Wf#EdvFIC{PV&cvMSKsm`MVf z1SZcCdaymF!Rd+D(Rtn~H2Y$JKb22I>xhADkJ(^WKid|sE_@3kBOK`V5MOTJ$P(CO zKa9=lQDORTD_Py3aIhRZk-5D#g|=s!XyRtcEL_(S_M8>#wM*cWIYRd4oeGA%dCQG@ zMxx+#OGG`MDFSC;09)$vn~vo1+%Qc|Y}t9AS*<)leP#<-;n_JbIYa0fbv#VL_n%Gl zJR3m!j4iS1i4-pME(K4Icf5^5HgBbp$tNbi=2OS%!QBhmY)a@m=w7!E>Lw~-Sobyl zN3|Wo14Ho4Uyt!~BFTGR2ArNPi)k|P^gd?|4%0Ki)MlXA%)>%A;$zNEQUE0O7=xRO z4YZ%;fV)ylYa<_nTjOp1>U1H;r`15yR~SM2Dih+Z=QFjbD&(=cntE#E!C2@OchX%B z^l}Iu`kO)nK1^nb&f}Q-uzZr;jmwy}F7_taswawXxzYAqP>_>NuNnRgJ{XWpr%JdUDR~hT%7-^XFwH zsJ-S0MMZ3aHQznhJ<~0eq@az;zYMWcx0%YM=i>EI1K`dx*qjG@1?@#*Oc zG$F|tf?9RO8l_nv^N9+tS&0%;(c)u_5KLDp(5EFw>;NswI*h*oj zoN5nmlEc7hzQ8`nc*VK+l!@P7bb!6L1EHmVDgG$f1&bT>&?UedU;T)I*s@Rjg)@yD z7kE*8YMrZSe0~aB9-Cj)qUX$ZRR|dnUo{lKb+pf4AFVDJKyQ{G)=8xi*O1NUb%%;m zat31AnK`_zR294q&Y`avmZ)Q@#fAp#VJA>UsGJMT>9p4nB{C6^xI zqoiy36(uL(zQCrZm||>?7JQ){A@JdV0+fAC0{fX=P(7y{KFkiHuEK22=0*xF`FfPr zt-nesQxw^c?J{hR=m%f!+Dw59j==`KSs1Z#J;irTB$*qd*)(1Pt&-v?_{&RB-Wo3o zS)B&o1Fw>LT`K(15!39OSIGEgHy^#P3Fbe&0AWT(bds}1w+-i@D?pb#-=wh}so8K! z-GsK-1~9wUSG>CC3@GZigg+L!utW9)T=E>n7G*ZkF|}@Szv~j5x!DT6R%O#%P8&8p zc?QlSWGLoaIEXhH;-^(Dq&TGdz&>>U1hjjc4_Ch#-OY#2B8u%N}xq4-`Ue#=m0eyvLs z{qYYL6&`y|1;u0WU6(gIGA;{CQ&K_a;cSkgz2M&|TfWY@pVa-b;ZoOih;Q}dUf6#S z{KkH?XiYuclim!z{ob^sR~kfjWdv`u0xR;Gz*J`Kr=ZDhaOk5C_RV*rsp1=g@6Jp# zr~U=Z+${J*ruM?BkE6(3ayO`oq)@tg8gBmD3sJkKQn%G4sLGHc{<FNG2$opappiK!ODPZJPSd(S9rNmi!k-N0&Mn{X9uMUK|9okYV0^R;H4~# z5c1^nYD*#9HI>a-G=*6&ID)COzCgN{65hYKhB@AArM8hGR2=+?SBtIYjz+2Cb=#d3 ze>n;24qp|g1(~s4?-#uNQ8RX^*8_`=_=>iL*x;_KNi3$ILCE;Z2)?~PBmord4vR=5^N*~`cf~8;@*J{*R^l*PeWf)nDim6 za%?7Zrv?0;DuLy4>m_`6ynwT99|`3ZB_Q!c$Tdq0#SoQG@X7fgt8Je_SAR*eKL$;p zZ}K0?HeX=wBYZf;{X*uyW*9Rxj^*p)zJULe1vsl*7gA$oSaRS#$|g&$V0jdjm}r8x z+$(Yz-p7d~5?G75H(T-X!o=TiRB*wO*YN2|F`2Iy&eqyK(td5hZhE=HF7td4ODl46 zqujvVKNcky<>IinWuSlfG1D90eeVllNEvz#Y@4O*} zr`vCE<2biD1a zMzW|ofZ?ibAoEd+bgafQL(9|TI5CLR-!EjFog>)fw6%C{*Ag!EXb^OM6W(>AAa208 zPEh!B6$)0};cdcl;gIkq&Q&O8rSg$@BW5?oMtRWIae1`6O&`WL6ydgIp6t-jOr}<6 zK`CAS?9h#Cw3=H7BXe%@(L3(&8UC@H`|fab{^)_OZ_DZAA#?myCWR^qQ83G@3{}s6 zB9liOn3ARyTW5Qn)Ltl{+qg843fjfg%oj7isfu)FSSZ#t?i8d1KxFDC#}) z9~;m&44-9;VTIu%(0KL_xPFUc!(<-w*2z)C>0HJ`f=_&Z`8eEpt(pBht-@3fALRT6 z{dUs(@$9=sEb}m4g|e-G1fIRH(^pU6tHWOLwO!_HO3qR$`7jle%uO)wn-?zr7lI3C z>$C3x0>574IlbRw02}N_L%@6qZb0@7bPgNM|C7H*OQw!tY7rf<*h3MPy&EjBR&)5T zTr;=cd>)(daz7ntX@Pmp6(}>kigxuqrweWNkXw72S1pvod8~~aBcX89pvZzq2hRl^6S>0SGvGR&)NRr%w z7ZX3gm8y^QYV1AGeb&#P(U7FoHUa~3({uX=nR6IdFo3;_`VV((C>JW*6*12uj<50G zOZOk00Nbn_Ft>OD%|{$y}NSpi!dKc=mWKRvDmrW98sRyymeAZ#~j$VKG zB#yuRA3tZ6;KRwyhLvwLc+PDEE8H{=-*eL1!qAH=M+SK^a>>Y{U=Enq5}0lNR4gzwq8?Bi!s)GR#02fi~#QT|tw z^Dn3QLoImc!hZVvP?{Z#doA#;UhvK(rm$FPo6wtNL^nvAIUI0hZ}anT|H*-PXSfdY ze`84-AN>UeUg!S70kCkNM5{%;a4g-8q{}Htvp~_`-=h5}Xca zXDZQh?LtmUS>TxWn6RVPU%=^CQdP{@4|K4x3f2yC5r_1tu)2q?EMd`aI=EDW{wBSn zJzA%+VdP$RX&1+~jL8!j-B&`bsh{9v_*i`3FLBvocLJ5{Frc|Ra_E`iLb(2X5=;_U z=95H8_&U)Dm)Z2dOPfiwTW%qaw;#$GUF@c!ias*jq(~Lbjnp&11=W8o#1G$d;MB8p z+I?Q=^)j7ApBK&%-Typ?ZSsz1jR9-$Xs9f6cwK;x8Ulp#U?hFZ*h8D!B1q!d5cYlI zL>BX>gj8A=upfSouq@CR$39QNw)bN(VpMz*iwahj+6B3S*8?V6GYnpiCiV~BYeu(bQRAX}6OKIfO zUF5;%Li%qZvo=B&KbMVS_gzL~WtAM8IVcsDo=k+W1`EsxAH}XOl%(D*0&_0bi*@8X zGWl$2tb9=qJ0yE3WlapL5JGOzW4b`$%_BM#rNT+NX>)tmCg4|-2v*~i$N%ssrCw_- z7WG@`eRN1g*D5pWS3boGe^oNIZ!;;!>LjhWo`OF{1n^@Qc|xMB2iCQk(cZ9N`dy>O z?0R;Sm4qF|%vdA9+*8Q?r3GGIE)6qs?{KXHi)sAiBr13`3%~FF#BXU!gr#9U5V6jS zN~Gqo<;yn+_y1;an(>D3A7sgvwM@o)^9RAJ#*x^!Zxc3{?uC!@2a~*9HW~a~k3X_Q zxer`9X$wB5ua6$^K|!&V&N^YZrc41><~p+05B0ROOO=VNs`;{$IW$GEzqBGYazZ4wdGvtp6>Xf&Yp@)V5xzC|$K>-Jy!Yu6)_d?O z9DS1qR&Vmr>4XkeYLtL=%NVrUx0j}mHep{TwzBatuAqLRiyyR4%x+xOB8${eI8-=? z-uY$lihLNl70+VXJ_kWk_8=cPR$%Z349A$zX}I*hF8gU?i7o$)ML$V1Xn4OF)*9Nd zuvIQpW@gHccea7(+Z3D-sLraJ?=X`^DtNLffJHBHB=s?$VD?`#-Z7vF1hf+M1`c8w zYD@6gj$*i5UypGDYhcrm==Y&r4XX(`Oze_hyr5~J8WwNO}J zBg^{deBmlyJcZYrI%#fzBaR+%fKo@VLBH3#*+Zy@Y;iEB@Lsq(Yo5c;JIi^A2?c-? zkHe@ROXy~qKI@r2k~}1WMSS7ZvTS;EsKiCe^#|>CK=;e2`iLWNx0uC$Enu zpZ6!Z4jXgWw7&^d`^-qWxq|#sPT=tOVf0~%B<~SfN_r;0DbnUQ%-SGy6$duLhBtPo zue=@pgrA_RaZBk?*+Bev{6f|E^AgOU(3X4k57Bu_47YaYYjRJBU@N0r!DY-Rs?hA` z#_P-i{o597U3aFaGBbyJm^DB&)oT#b*(CU70))3#_C=gB$)D7nbeU`OFkH0LoMN4( zK$_YYI5qz+)vigR0Mjr0u{ZK4C+&}G4gHwc41td)>n55fWR24l{}ap9nPbPic&4#! z8(QL3NSu?5tw%<~{a#Nj2s^`>I2VKC_(Dqa6n4qKR-n)R`=Uz=p22$4Q|zzW7}!u( z3Fe(g;kM9kKesm%*AHpp63P_Vsx7s&?dMB6dHx)&+ml7+dLel2TtCNR3u*q}XXGul zj5?!C>;vzfqFZCquzIBdI~i{T-fJgRK3?;QT#pT4wX~{9R>tusCP+|{)(jKPp z)fb19G_hm)k)W2Si&29;Ib$hBZrX{>XdOEpT9U%(Q&T!$@q8Ye<7Ug;GdiH4Z#wOU zII_Kx$%>|o#U8~WY{KokA}2?Ec=qcbU6^x@!qd;=SDRfz2D6G=mg&J#zRjY^b@H4_ z({S$e-3b1vQVeG}ECz$-DB=o%8??sz4QHRF8VzV56JZ+Bin!eI7)f6^p zjwh{-UxuLYegV_Mq@wjVrlQ^zYd=Ri^) zg&ly@<@oww5?|KG-wkWYeq%GeoZt+m1`6EePpM3KP%B?_{0FyYmpcD<ruFzJYbS}B$MgLp_Mr{T&tHPNd!sKMvwOtR#wal zJV~z4D+sI3%@K8dm%|9fW?G^r>Yq4)d76#l7JD#= zv=>YXr}(=Q>}cInA6h=756s@HGi#wi4| zrnY)e`(goAyA9dF)bmuAXic8OYAGh@8U-F5jm3LD@NK{Kc(s*R_=$TI*a)HPCZS;f zTV!w)Vr!zo-El9o{FT)JmsqFETVt%N=U+NnCkc{J=!-P-D7`t5+dz|D^Kgk_iW#k-Bp~)q(msWW=F+wLf5=S7I3w(?CQ)}>{MSn3`|+VPaHWHUsd-| zeBN66KG2KL%^$|*FO^}Ng}Z)0J1-ceH~O!?or=+=Sml*eQt&xMjSDs&|~>&Up@_sQ6?2hw*Ko zwKFBfY_gQTDbH`1X0QpdYemuhGKA92kc=7sg=LjpO{2 zVUB1xHC?pq!%{vu_}j#NTSr6l))2DS`U9t$OaNQu-C^NZ(hI0+3yF~e(x|15DDG6w)eo{ zloIFcJOH^NF1|epn*!SSYD4zM542!M_Ij;wt>DEZ}yB5z<;x(D)xgrj) zNugcGK6Yw&1l>GW!tL}DwDFO~Y{KJcrk!j-P9~3N_3}RuoU-l3XPDlBjLN0{+Z5@mj*l21+{zk0!0bXNd!h#F3#j1LM9f-YDaFZi|t%II-a zD3;Zz3tdU0aC+KD*jzD-DwJ(uma;!Qe<*aAtkFlkpNF6;OA|-M%wt{Yj;v`|0NB<4 zqMZ}!z|klJik96#iw~EerZgTeS%tAcyL^^CPUwd3npWxk+yW-+&&0?p``F*nlj(1@ zJ_|GQrHJD$R9N;Eeoh901xK;#^awWb{%^j!-4p*#s1yzFv&D7qy20(AH=tuT)ChXu zyqa37n|PL@R=BdxZsp7W0`Ah5-rJn&(V^_b`ZQ>MQ3y4CRpKAM!t7?ahJ{32WQi|3 zX?l?a;=pLpH@S3XcR};y$Ee}Z&P83*rKRJZlFVl*)+W0RoZhY$x=dnN zi(Ifsf**@ATK34#m;s%2`VJW~`|-B*OUhoFgk!D>y<1;);JktBpoB@G!qA~OVa7gM zEKv?yOMcQ0FMW2~%Zio!9na>xK^pXW4jR?Fv69Girfc#UWLKQ#Jm=PM)T@G~Gwtx_ z8==Ql$AV%euE1H#YB6ZxUi?0EpZI`WH}tt(ApePqC|xeF%iCMvqE9`vnZJi^BmP3t ze*(*S)IKU7I)(*}b;S0-WjK0d0cBKZum#6W@Ssj7xE$CoN_nkMp>0AQ@I?h@812iX z){RErO=jTl)5X73-MzgE`vRQ@eXcqHUpDnPR3fo+k;mdq2jFOLp`k^1Gy)PPc z_1=J%)gtbBgbF%GKj9`kN#@e#4Pp&7+OW;^66hR1&naw+fysaUV5`7ajJapUB*v7$ z6G5YNUeN&Qr9Nya&BMUwHZaA<{T?1fS~e$YEcx~rvd@x;S;Ki8D4-?SI@ zOO>L5VH|DGTF&%Fh}b$ud(?^ikDZ>GPtg;^T#{Y`?-St625j5_zFGsA-qa|kun4 z>0`#W^OjgW_8{aw0W$NQ4UZq+;d@ehXze9rpQp*w026aO>2{ir&lSV4a3vF=i|H_X#E)1P2NI|EV!ugh)u`(~Y53Q4zGv7um>+tOPfoZmQp`F;=0aX| zh=Br|aN?jy=Iwescf|*o)8OS>3cIu*|OIcc3D(-BLyl-sULpqDRkoYv!g+wAHo_SL(OZPURGw ztK`Fht^%wWd5CFTPA1R2tNCf$Z;K80s*r=<2)Z5A$LGs=Lf4~}aP|IN)^S&f%H_sD zmSG2IPrOZYHO_Fi3O|F5N+=etoQ6;T4o8h06=XDOH{Abv5b8aaFgy2BYTI*?(lZ5< zW%2pf1W5SV&NFd2x;~b~CQEzFQ_^SQt)V!y zx*bMNOJc5(ax5!R=p1zofUmpDc#Bn0-CKShm;q5{v9<2=GUmwndkR3?WdZt7hp~MP4dNP;O;O;YOaCff`(js ztW)&Tz61|^&ZG_cWn9>jS+Jz15jD?h;&O*zdT9Tc#G%S;hI|U{P#=wP_fFHtf%PnF z?mke1$qpDpNl1zL-cyOI_S-ruW8KQMzk=l=}~0>tz~YZ~jncZ2D5 zkAj+C7R)bF2@NzeV4z+*O!XYcUQB&J%8|ox%%0Vlf2E5Wcb>#|%b!AeUI!m14#kXt zJuvz5AoQAX4vUso#f5+>KSco|AgA zFEdcp#8Vaz(Q@b*Xt^O|*H1a&OG|+_^(K(}sdbi)+;|PHwb=r*DF&Jbn{;xu z2MbL&j*ph7usAUfp34G>b|_LYm&1HcbIjfU5{o(BOV*{{AZet^KXR?7U4%}I1TX=i406uS;!BpFBQ+_fl{`(uS{c-iv%`3W47~9F}3t42GGqx;Zv1 zUSc(z*HpY*xG{qpoE3$5E*HmcQpKIo4k&Flk~YQ9;ykYHC7XxH&P@l%uAvA1N_uI5cKOGpX+=pTsbY-Tyotb%u4OCxp;qs*3@fw&)>zdTz_Hu2w@x_Us z(f@+>Z|b1&YwyuRQ3`*5*;7s}DF#0H2tLJ|2Hr0v^^;!YxU*7ST)}P_5FW1qb`GaA}hA~j%E{_?L;z{MrFYZs}Jve0>gp~=W zVCQz8?5dxWOzkZCceRn+L^-_poxtv^7c|cI+AQtaZ8!nX_}@Dv*vZ35)K?!(I_2-d zM&67C1Sp||LI7m9c0jeDvCdq4fEJ%?r)kz^=(a`3u3m5kle#!~;64mz3AvH*cOKN$ zE9A9k0^{U2a;;m|vE5f+fnRq#Zuql-^cp{dnp6!e9y1VkMqQ`GB|QRjKpGrY7Qiv# zuB+dCh9Z8t)09P_Y)Y^b|F;)0a(Gz(U;;ukV-ASi|PRrEppq<>t<`3ACbu!BiUW$1dc_9r)BxF@!FX7^jduL;MBS2EST@avS^pX# zexGH*=AJx;k1yx&_oRxsH)YerKF>7J>b(mkN7X_2!)*S+yK%w}k z0ej?)Xv6d)Ds$V!jbJWdkK$AXW_t4fy~Or zR>=6TgpWd>%QD&Zd`Vgj>|2+Php+WfQs*~noT`EDbAc&ZKgBN#9AVxuH>e9f2E%X6 zstR?x3f#3dPyZ0S`OcISjU)(M)C;2XuTrc?AKVjBKwn8REhpU{E5vaI))kW(CF z1Y%J^(e@0cmXwFf>I$((Dzb< zSC~2)`~OpCB3Us^8{<$l>V=>a1gFzo7D<G3&pte@Na^JH)#KRqh5~|8_A&MADmr$ zlmdr81HBHx$LW^~$M3&}>lwf%|9Qhb+B_Ct`wwA@2WE2@d|fCp#toDb7O?G^`80j% z4jfmC_&LLtIcpoSdn-PZ$)O`S=RqN1KjOw-UksEr#cdMv;B2cmREkPLd6Pc9xqb+x z6ccFgl|0B8R{$rgqH+4(FMRMKK~EVtojMQfP<;p)Bm{DUxg7U8ST{hsoo;A`YG@UG8+iQr~f)c}s+{nxp7_dMBK*&1CID z&Zo##i{%yzewOH^Z06)`_}Qsibo}B^Zb;l3+<9yTzprv5Zfg|Y&l@G#vdSeWV_U+v z@+nxSa+m^HFdS(-MoOhkTzXA~81%Gp=$lb26c2!;=rp95*W-koOZejFF!pkKGWmYq zDb`A`0*5F?wr}@qx)kBS>E|X;qrg_zyL^(I7FXb9c7yiZm6ueXVUwy#HTms_MiZYo86^QA`?<>>gS4Tjww zi^rx*v-f|mGw0Zq+;%O2-?RM;oiZH{%U1t^n7>*0ew#OjDo*0O&Ly#-4~yvm3R=$k zf0X?E0u`lfr9@%QIUBi$mSrmAz3YNs>H1=J*ejPFkGnygLGox6lEi<}Po&7uV@%=r z5=vPz1oIW^X_qDkcdsmkf~(UpXV^eG?H@u0QiMlJn@Q3=3k}O>^TQtt=U#vla}&BG zux3Bn57A@u^&Rk%e=|SY_c=Ga_7%O#Feb@{G#vH%DO>Y#2!CPfT~exB4O@qHaVZw^ zr25Gijkn7m&K# zA$C-nGLnAXPobefnz*>O48K>{v0t+GFnnJyeGR?=u@{Z`4T%;s;>kR8pE`-E*6$U% zSHo~pz%`BBypn^m)R}?ylu5@f<*5;Q4(cgm_|F&|c zUL{kk+%5K~Koiw;5^215B$IA8rlfXdRw%ZIhWc4>#oH2sFE7R`rn9&+e-xP9+I?VW zeFWqLpNsA?U)WP6aJ@r@QRiPXjQ(ANhurmH=H!JmXXX*icrlD|D*xc}7$uZ7T87hJ z{s-Fy9(%E5ulSbUElm2Mgum4%xgQsoovXoX)G(>`z?FL`wE`m)Dy z$%8D+jeG&O$9BT!Jv(Vy#BdDR0PITjaD24Vl%=V;GSw_~2+nMzkX~yxKd2iHFL#3R zS@p2GT8=G0xP{rpuL6JmB?KPzVYl6cT*YrOy8U{A9-At-;Dd+Ywb2teE9Au7()?iY z>nN;!7=bOxjDPj!ALuxHvsj6x;;UgN!ES#$wyqYt!%4o|=YBj|yC!){U%6 z@QZBd^Mo7W6?9fy11D>x@msbj(~dh2y=D)956=f3i$wNc#V^=Ue4Ec-qz&kDl38mI ze?hi^f1wf&u^+;i)RBGMVR0FzOn**4>xAC9)BA;)TZ;d=cLj;0qhPhW2C3{&U>K}L zd81^wJvLIJZ85#v`qSkCry!DFJaI7IzV(|%EBHY`?iz|6oenwKdvVOY5#VX1!Tt%^ zj>7}TvFuN4Aah+Ee_8&pkUvJDU#bk3J@do)`6jGvQy#2o7zi7Z4zcpi?{sHc6}?Qg z#)-BLY+G0X$jn`VSJxHbyMS)-TB(UFXybVrvr!S@&M3A{&XoC-$m0uZ4Tb?4v|>aQ z+w5`}LOOrK+@-Q;c{rX+&5(g7`@Ok`o|Wha>dbN57#uv%mAcj{lCqOL^#m?tPXB#^ zAz$~hvW|oB%Fv886{xevdt2GA=l;0;>tIUqPJn~qLHILuI{3Zn;DQd_q_vA0Xo}#6 z=y`dOQ#_W8@!-mR9vDfHK`;5EYp>JC;t4Evr9RGWZ6(R8gJJRRncTnli4>U`g*%iF z(8@M3e2^W=jtJcJl)c_ez2z%cw0;yLYh|XPahMI?XavWU1rJ<#HmLiR(8*ty#N$=M z7`{}&Ed>L}_1=D5XE~J5mJFh+jfPC=*KazUpvP8DQo|E1YAMZN{nD12^sK@3pDR94Go@dtQf%>o9Z(eemfQ8D4UB#6fyFA3m}{0~ z##NduddN76dANmk8tdS)l4eSK*G+|+x3P7DhO*CxB~Z@Eon9nNVwL(k#jX|S=%U40 zNN=s<>%>;9_&^=B?C>V1;(eH#G95pLuVh1SPQl`l>Uc-=fL5Lt*yeFbP+3((I_pQ{ znHzz)QU4@gmR$g{R~qrt0)aih^fBksD94=M--n|CAsBn09Na2C(!}#+q!6x#uiQ;o zuFO!j+i(#3Cf@-iYk@cOn2Q0U`k=imhUw@^pz;7YWZZuwYN zF+3BS7fQnB)gM4IClDis1VgmD1?ntIp%~pVIPmHU{pdUdcijTnlgCHl>qJw$eYcC& zIZR^O!-TWyvg?D@ph&DkI^P9f z`W-Xu-h6`!vej|MQB!u|fIsf_GDa0Y5YAE`{%6h@QV)Abs_)84-#VBry%&tTHjAjU zc`MUey^OW}7y@TxuhAj-#dvtM8?(4Rl*sho=RtmKN;~@UB2HpKj^-{EPeK6=cn2d$Wy=olu059CS+ zr`SVD*DHR($myUVD&|M1{t>1AO$6(e9k8{ugC@LK3u~-LLxz?tt!o{G%4zYSDu0@PWe;`qie2t^WcrA<7WqqrV?CKMw*wJu!?`W+c!4Etzf#1P?mPm= z4y&T&yuDCU=flo?YX#H)l-Q_?OHe1J52BuHqk?<@>P%NaZ{yc|k@*}dJNARtUB3>M zUG4%0u9SW(5PT=Ix6qg7|2aDEK&;;Pj~m%!WS4|gR!JeAbKOQX%qEF6rD&t26xoqc zR2qs3i9!_5x$Z-WN)d{Tv`Z?VN+qRy&+q5o=ReOl_kCUO_v_Wb92k7YWJGSmY9Afa zZBvVH(q7`9tZ5M7-v{S!9fsLb&CHd36KF#CeB$3Oi-V1JP^alYB&1ygL2vmvq|#?> zo2EqeRHxv&?H;7y(R6xGe+nDv@(5n#&V{cb94hdgnuq@#0+)p^(eSe0jBWJf|0&q!8V zVnI6v9K7hQXZEKw0^j__l&)8d@i%Geq2hwIz2daOKtRnl%%G{g%OGfD1^DGlk(6{- z+V{hq?5yqO6*RA~^-YmL_k_2tNvSz&K2H(WZc@W#Mhii%K^9`CT*CaR8DLlbl8r0k zt2g)0VbB~C{BNs}_h>F-MoE~Hn!+$BR1Alq6KTwqpiv~mzlLdf*bURpWWlvmG5q8` z6Abl}1sO};vKuA_(yV*K>^0AQY?wUZpNF!<jbJDG30r)I7rtX#KZT_ zlNh-&a4Jzod&Nl5kQu{s7$%|0n78o$ksU0&q$aS=C0fnio+V2-o$_K z)*yxUT---;PaY>Pml+5;jGACpiZjh^ilRRP=FtfXmi)6ng3LY@ie3jKsZqcUGJ~Io zRZM9o4XsOPL2x}BAN3gRZB{J|9poLIZtd=rSugx?P{=6g2;1 zZbsL(mMsW_`Fx*qjsoSPzQo+aWc?+gx)#z5<>3; zGcQW!!PrUm=pt)IU*KeNB2STo@xIr{RgUn4?*cTB9ZmaJJCKbVFW|#v#^jIM1`;;? z2L$!%&;?tD1Xt||J7b+QI`@VbGo7C*t-*Z1ib>(9)|Kd(Ui$_*+q zRuklxY{wrTd@wyC8kOs35$4ef=#O4Sw$DDwn)n-$QOm0!&LtE7td1oTukFZ|>_dz# zc#(Up?qsFZeKe)tnSh4XMCQ#M!s;E>%6(*C`jtW65V$jvXHdG8Kn#RigCUn9-!_t}7j^KZuXp zCL&!{1zY_ez<14QBwKiw%}SG@+x|QR*^T*h-s#)WQ@RrS^n=;*UG2EqdlB(#nooDM zheGIz4*293!;H513TemyB~dPOlyQ1O`<~5ZO2#G!vCFS+4zAGtF#hMD8-$b1QT3V%OJ*;b~fu|c|i zRdJIJG41 z#tc0ZqFSXNYUxj=XSL6vmUS4v;~s|3wu$tN^hCBVd>QSm&t<+Ji=&DoD`ACQIDD_t zz<&z@=&ev!I{QXC8S_GvJbflhf~P%)3(Fod-gok$zU(0UwRwWVeO_em^eWVwEJ3#< z$t)(kz^m?H4e;vS_}? zj${~Gpl{AEaKGtGhkDhae-A@f`Zl9ht1MmBF`JT{Q8dTv612S-M{cBQ(K_jG?7}bG z$-CNI*gDmU?Pz?DKLslE)oW|gsC=AGbn0U7@5zOoztq7qUKy<4c#%H2F*ucdicnaF`@@hpsCM0K5misrJ-0W?LfD0oj3+UUUvKbP4F&RA{V$3XYq935M$r=I5cn#WhRP@7=*@g06q%fk z&${m6*$E1Kr_2sxdHx3LI~%5HKZB;dJkzPC7xuKwr;(Blv~p) zZ@bW?7HipW1FC4&zBau6Xc-Gv9Z=chkUnCL# zyRet&KZ;@2QYRR3kHZ;!KSLv|)}~;d9!WOd2P^(v0sRl+%=K>tpy4CVdkPO>DxC!- zN@K~U-OI5=Y9IYu{|H=f+&~)(GguwAge+MRPp%jBgZFw%w#QP8{N!Dz2P4-KH|5DB z_UUh|afv8DyAUog&sQV{?)dmML z<1f!de{>Y}?#-b+NvmT}`-Mx~1tc_9 z6k{y3NW8*PT#?=Zj&F~GYxQABzM=>(4r~GW6-rdDp_4USz>w|M){s>40cZ9*Gw1m; z@ahk#RKdWX&xHP9-yeL5Epz=y&tCqw(`gYkYUsudhsQBw$vH9{mkp(446K}JN0;0T z!T2Oi`uC18ie}$s^WLwg)2_?ot#Fkpi_t1166ljW%ELnUxf%RR-JA}=|N&TV+xNpWPn)B!o zx_n%P-Er`CZv}99HWMj1d zV{GY?M6se_Y%ASP$Id)WQx(Tj$=tc@id*lnG1-$EJma$$Q!fe{=3Ch+w$337B4zm* zngYEVoPT2694`dJa)T=Vt4*yhDRds#>?aU>_Lj`owkiW zY4w5wHyw%pFP^()okXwmjF0D`8&Eo@2Ml|74qo41ThXCH`cEiL&NlqOP>T{AvQ`15 zg709fa)@lz=67BB&(ZRuFWI>z7Vgc|f+qJ4Fmh``$H<9LqWpr$ba%q>#}DB@?LF9G zD?!H@nBkAOdEjKIfHRic5VNg3yGgNxaoc92pIPJ>`Lg55{xK;&!4m^b)W~cF_(lCEl*-aOz8j_zo&%$We+i-%c0ZYB3 zaO__S7%BP_!}FKxzMhfHwj_ayrL@G17t*_l{fG?T=NMxaXad^({+8^2WkWtTbmqC?F_ zUdv=acV_uR&2(#F7cmPp5BMhT}gZSbvm9(DM> zh^AZrg$v!)IO*#Z*sI#k{HMK(G+miR8)v;_vMLa-%-utnDielWyaTjI;EJ`5J0^d6Hm*mjceo_as-2h=SUSIw+P{ zj$fYLLhH~~Le5bFXNFuy(@`}C>FN$1_ zG$Q>(feyQTp*u$1U`1W*Xo$ZG&yq-k_cwVqQA{rA?eD8v;K_d$ysp6#K7aVMQjMI- zcq6E((`VC9C&00j%4E`pT2Q|yO&Tku=)XIcV2agE9GjC(GOnwV{WZ2U7_g&nP5Wv!>uTYThQ^@?;!bR10(8i5jIW4mA+iv4>?g*y+!)932cD_b5ut}`d6XaN*zE6_lN zxj5|H0>RS^$;8DHcs$&S=zb94&%PD3W`zT(4^*J1Qd^mf5B9|M$wRyt_6choqlx1K zD>xv#4P{sGJQF-p6_;R$Pli8X@+1wW#ju8XsXqyi`kiDGwyj`-{rG&mtsbh}Ig5r9 zVqgv5iD~PaO~;Eap_=k_&@Z+FWImKbq|@d|4_&99Q39uLp**QTvZU@GFX?335odG#NNFZE-`HSoNT!z|33Em75x9wBstvBOJ( zIK}u1bp11#hvGapEMJjSADK!%FVY8l1uObu+7G6w>J3yfXR7k1WWdFzB4qUHJCOBK zm)!oVMSJfo1NV_w>ag%L^W^PrjQ3b`KE{KX7(?Z{=GJ#!^GJpthB z(+FQK_>p}Z3~6ouBV6jW7@vwvhv@h?(D)QzC0lMlFL_xpTD%9|HavkeADhJt9V((# zOON65t9E$C`32iir%P>ZpRwl`)}+v&l_ymLKHPPnFj6YY6J8LJ(Vuqx>uQ$CUc2EFZc zPkXAM+Ghi#=T$H>pSTmt=13^ncUka9b~dXjV~lyXebAb}@Os=No(2-gKbwr-`duQ*m@!7jcl*NI-!g^XqGF4S+CKz%h%u>B^h ziRxxmbb=DNe?E&WuR2QI>|aAz-&SERqX<{$GgQ(+h2H%vkBbHcP`cu&@U`z{uv)uc zI42^SSb4Y+t#5aPY0n2xm>x`Jrt;kW!hPVYcLe{*_agOnp#O4p;J=eBTed%qHP%Zetj_RF_!Z2NJx}7s5V;7mhVx3aR z+h>dx>z~1%0kCuZ*!(l|%~XNg#?fR!!dobqZB4X~@I8M=87xX*=mb^)Y7_l3^be<9kE9Qj)>Germ1tkqfwjAz;=&{w z(tge#-W~8`<-?1Z*3TidXQ&c>Zr@49&YMNgkBflKoBANxGK2OlWvEB792uEf&P968LT!!8YR`kh*^lY&jxNw4YyxQGd=e)u&4FOyw3F*&a*>dM=YVQweG)a1-3C znoXsXui(lL(!})kUE!HbbNZ}q0{s!J&ra+@+vD?cWBrv^Y-q z*6#!TuCkdO3bdmK4&NqlWdk#C)R>H)RR`ZMDv`4i`m}@U3Y$E>!ap9v<}@ zQzO$Dx50Jjp5{(m9DCrk*Bab^Vi&tEHJ{`dtft=$?ODdZn$2cniNlv)C?WY7w$0OF z3$@mfhv)RDiiQWR+M`Y84j-dKA1pv3{yZk#|Bbpg4nt*H0Mzfl17UX?;YHngJiYe< z%p6$+$w_9gsNoRm?W@8J#c1}w=z4fJC5gXdB+`j8XNhK1DYN;jKQ8rMcoT%5~qd9v


a-MH;}wtcDhzZSHG(I5Ucj}9LGa;eE>0Bjq$fX5U|il9 zgS4qLRrXP#Ha7=hQNRkc$*5t)p3K0#=5LwnI@ToG)Q&l@i!xTUL)@lsoUkgJ=up!Iigg^JN%x77T*Vcm@8PR1eww-Oqe>0Js;90k^?+ z!B(LkaaU2pKr2=9V#8T@+?&UIRJWn+smF=Lv+Ec%y^Eb{dXxFSat_gUevc~BS-35K zDXnNQp{{-t$n@4TwkyxPrl)J)@tKPtSX4NgWGs45Wzvl3WUCy(g?V#HiM%O!I_(bJ zowM2YRG0zU2o_LrMNzW$nH4@8pv;BGaYU>CAowSp#D>f=`1`F20!Mzs@|?+-$9vnA zm{a(C=oO0j$C2;pU1&8mkM#XJ&4^FdCbA~Qc*^!9$cOpk#Ac59t;f4JJl0`iWexQD z+`-cx?}Zb$&L%tJmAT^LU^1%0gce$PQHg~=;NvG3a@k=&Gq>5E_Wo+6`_CUI+oE)d z{KXRdqI>{-bQh6wR)KvSF&g?NRxlP9jmWzEG;DWF6r4FO4`$=tpx$>gm6~yhU445H zc7#u%yL)b-;e%<^E~T5SOXVQ}@SVXViBP`e2#JpVv~?mANf zvm>`qk@h=)sS@-~ZUGrHatMb$>ae%`w8`Pf-*8QnpZj@^B2mp^>;#^Ha_dqy&&+m* zUegKe@E*QzvRRfnBUB?(?zz+Dhjq!0N9QTIYD*?e)`0apFN5`kQSj+6z;XLotU~*K zV3g#@Y_($aJr_+xzsS%NgT-)cvYt?7&MvS`lVX*6k}$n`0rVf1r~Xx1DEY9C@t)6n z-W@j)@sX*FW3drCShW(@DGr17s(5y5s|8(DxCv+D5!5&thwC5hp!?Z7>~r;x@RaR^ zF~8q3AqwM#&Zm#U@WmVu)6B<@7i-vK1CdZ|S_mz-190^^8?YHsr~OYpV%wcPX#n1NS#9Wo#0@_*;tyMuogQG%8+(F5wfJ)jSf!|ryUBXQD~|R z`>n>oKFKAd>En3Pn4mx+9nRP+_?*p@jh+Dw+0JlOLW*hfZGqPQTH%4+y25}&N&0#2 zJy@h6O3rDo!kzbZNX;^BTq37WEpE>sIj>4!WV0fAw{NEsg}ESVmn!J{)mvF(80;1ZQ$UNZg@O#{72)yv0;Iy3t?@@>&Pr~X@d0-jMnR^E{DyLG5z~_+T zV}dKUt`WF~M>7;+h{wJV=2G%rlDQ%UvX^Bu^DA?Cfu<{tX_|q~$Buyvu4IQEq~To^ z-m!7T4O1qUm{Rw#*wFZ^x5QnR7|!WM}I4VJ~)#QK!AK z?PO5f4EI{aK=l|0!IPZVLWo&Sni}Kb1^45+PI#PjDW8sf>c;I|2kGm7cj=lcWqNH|C0P|z2<>Y%(0gzZ+5Yh{O}i08dwQp{ z2JYTC;h`MfYPg86I+ntt@fU=P*ZyTvQ^d)q{jtRELM7O3VBy|Ymc3SR4=o3>z)1B9 zy&sVdQzFC34Ta;(%w5&2PhcgeK9C@pDtt!#k2!8z^9)8URNMK8(d|2ab2M-NlqAnLH*A-XMPuxq{~8@i2`w%$?vi&jTqHtdGb}qln$29g4%8m z(rGEp3jdBHccm-Q@%dY9O3DY(v|5m@(Ib8FtKsRyH`uo&f{0Z8hanD=p+ojA%xG=E zABWyxTkQs@Y_Xub$4tj&^P{M;;S!W-A16yLPiK_XE;GRogV?g`Tc9~(I$g7TF?v)b zkwfqbc27HqToAuI@rwa*Lv4K1wh%V-AHkwmVZ^RRj0~t{QR~3RY~w#?Frd8aUSULdOE%vyBXLhgQ_VG3h>!DR zB7XZRdKV;7Ds)4sm1<NnUOI#RIOXD5A>$4}L>c92W5 zbBTX|G#RBj7uyGTr_ZSt_Nwc0{vO>!jjGq+ij+%KqxvMdTUbHw1|A|WF`3whJb*yQ z2k`X8A@Zx!9p_&hW}3op;xbX5p<_@AV{_!8QG7B^{jNqcla533!(FhFHsD72uOR#+ zOJZB?$;#LJVZU4HzXFpzXfuL9o%N<@!$tK#3_TKUt%9o9o|knJu5-&ZYmo5=6zgWO0ez6XYktQ1M9{KNKr{P zy*FN!?)5aL&!-yT#YkPGbzJbDzJCO9!jja_+B6f@-?YgQ#x}Unk^XPb*Z$F7{c&I_ORj!dq#oxi|M*`8B zaSHUc=K-X>BBLeDs6~wnkvb5=Xnya7*yB&(XJ91O*JTP4gPyPsi<_8!(Gk2*q(qJ- z9l(;#bn+|LgNnHVF`ny!X`*6mPUJ?x6$ee`P1+pls&7n6%{<|@_*b-ET8<*Jo%k+u z45$PQFm5+E7(Y*(6ihavy)k|?@mLVO;Qa*l%LmYo_oYDRs}ZrT&!l_dcKnqvit0J3 z!Fmfhfl6TsUhY&!51&$={aC@yjQ$HjmB@PKTfhk~C=UFP9<#>3yB9w?d zLgiXFJmywLg5S?#$1h(*XB@i-GIskJIngdiayX60%B)HD@nY6I_YvNv zCs=>D7K*Mo;guj`+TGxTZL2Iv_wY%mxO$2SxZ6g4Th2npVmu8h9e~e&Iw4(Nl7t>C zCX2(Q=+ti!;*4fIR(w4$n zF*h6=r9{L&l`!F(^1%MN50he2#1^@4BV#xrI_&v`lHcOV%PEy0ob(PR?ifdxzcpr> z7l%XYPKuk=tC{BGxz2Sn4OSBOMZh3_n(s*tvh%w*N{pcXePEM{aAPT67e?Q3hRbDLD55$o}FV27kw=G z?2$RSVQ~a+Hu0TLmy=}2(`O);|F9p@ev24;zYqBzXF-^EFK?=8xMZ*JPwCe0x zI!5Cncnug(@h9IQ_J4NXKY6gr3U19Zqb(+@;qyIF&LviguFPCPV{+X|tHMi|QnLq-2|EOy z11?bXrW)W{5F64n8+7~bVq%3AS+G`@eZ@O#mrL)bTa*NJ{YhEZkPl=7HlScvMyjVX)zhf z{ln*W0sTX!5%nxZ+UNfY=j5fZ=QQ-Fa(g;-eEo>ZR>`>5`~aOWiO;HDD#HQ+@6l4M zWsZ44MT7Uvsp7kV?_`59#6%oG-Y+$+E^6fEGl{|ZWa^SWD0u2d7ED#77iV7+2>**^%D%;-QSf?{+vq}-m2YC^ zJAW8@WWpLB3q+G|sgN;Uns=Y<27g~;-Vq(jGaZUSCqW%FOBCrpo=Km>XFd1x&!OyA zN9zAukvOGv!mHP7=-%_Z3-^O06;ItN(4W}@qXQl2gBvjr_+T_S`y&rq(v>NHVI}Vt z#N+mrHrRMx4cblQsnSdXy04^^U9syU{+i)Pw*<~74>nnl@YmYJD)!^r}+03@#TsHBfBcIcWh5Fz!nAy*Jl!p;~pYxnS=X6%XYbK+4 zMV@Z$mLngYO=MfAW}<5giyFbb^jgMgYP#E*>^rfTI^Xz=^FjiM{q>!gD)$_+uBMTw z<#Ei7kr$AvB+os3uYn(u{=&sonq)esOKHO?qVFFIZ@TAV+zcm-$QuLQ(mTkL`}eW> zv^VZ**n|lc4uZQ6%pjt{ob2-VC5}>#baSu+l`y_YM>hEo!#VG9!`(G>n?fW$HCsr9 zLwUqKEez1vfEbtcz%ykNP_2*RGaVW9j%hlzu8d^T29(g_kvw&;9!1g~JEJ>F3r~Ki zLd%>+`e2+1Ip*yIi={5n;Qn~pS2xJ)uZzOEFjc;%!1D!$Ex__1qSW{hI%#?(+4Jr$ zrp?v@wvz8GN`7am@*CksK@+=_F`}yDez1#ET)^yMIi)qci&i|Iyg8akwIfdxqjNnF zjwMv-oB}pJItMe2Co*$~RcT&a61+YcLiB%@L+5xke6%nMJzmbBr__7-&xkUe@im8O z><}`E3-jRK`NQ<~26?jb;SH#Iu8mgGKu1U0<0=VP=0E;DQFvp54ZXX`)fN71)T+be z9ao8F((8r zOgl^(5Q39hI6Ih;57|Xav8>z&o=ok9`i2Lr=ZkLMTayX5Ps!4I0JxDY}#?wU) zM$zm3$Kg2NlWL0TLB+0DyDb{*a~aX_8W1o+i+7~dSY&Kg$KqQu1oJe#!#&8_M%>{=DZ9UTkOnLh<= zIRi`CCqnU5UF`obopcTz!JS=MI4HLs0v`#ndvhZ^UHuFKKN_O@tbAMjk`-wGCI*Tg z2f!Mg8z54{(W!g~_rymPRE|7_cW#^0J*BF6;G;h?p(+EG%EuFb37#b*$?tWOpTO%@ zFLrf>6Z+~LA$IH6VAZA(5QN=@?VGNF{2v9{9GAuF98e_E8H)5&-d|ymSUX&ctb!|9 z|3RjBvM@D53ZG3MX6B9KyO&dcGWOYO#G-8p*gX*=b8l!s$=q@Dw#8S}88wdl-d~4N z`%W`se=?+Bp$xWfF2?xk0-P{ji`4t?Vk>s-Au%^)h<-vD29&6iG5o&PpZW;7~QaeNUN` z=Z~X(tQvXt$DfQ|w-+zUb+9^lD#XI#Jq=-9NtLaRTQwjlj7WRq}D75&XL*M*n%s z(Ey%#+YutfLr=x&Ei!>r7~O+Q+I-H(_#V+y))IC|aq#$&HW7a!LTpO5k-jOnd8bY? zyt1B8Eb3+GwEJ?zHlFgl^%U&BH-@e%^JLcwtzqfZKe#URH}8QRN5_ix!|a`7?PeaA zqox^q;h(Svv#d1ezEOyW5@$nKz*$@sHy^LBkfb9`{?uWfi!gXr9ev!TL=sJp!t_%M z@Nv%r{w(PXf~k@?Y_*sEP8Fs77Jb+i6b_DjM#7Z$hy+_glMBJS_*x_^$X;jd3vhzolqB`xbpR-i@liPNn01z9tGLN3kQBpC4u@ zP_N)=^!(9qb}OtW&H86pZ4ql)mcs9(KE=_JiM9Aub2}6Memaesd!E)*9JFzIRsy@@ zc#d7@edyu)BfE3?Je12{bU)xvE=*T}nR&_Z^jTijX?I1ql-3T0T9>GY;~^?FVnkYH zR0Vo&^#x$EsyJ%P z%8`_S)xcbwPVS4$;rr6(VczE>ph&M{;Hqwk9zo`4oP&%2lTG~&g2_pW9}lJm7u3W`oDd_O(!pbsO&c= z_&XKf=*=K8Rny2l6&=VQH4|KQLIs<{!r`FjU(n{WmqER~pnf?McB*B<&9WTiL{k~I zJ8PC^OsQa_=3>e$T0v?XoIu=kCcfm|Qq5{ZY&4-DX>$My0@cZBoxrc~~bI2dU@pGk=!0;T)bB^(rbDGs7;hOAH^vsni!}m1U2AHMYU8VP zA_ldU=Tc_#VcQ4k1uf9mmqWlk*}2fg1OGsRjKG{kKkxH=ZWD$jg+aMM?MklP4> z(jS=xiNW;!MqldK?F{>FA}g}yA&_$dT3RuJ3+3ix>)Z`gu(l5$*cPyP%Zu>+87u1J zo`zkL(Nwdr7i+c^)ANs}(#xNIqHS*`vCaR)W>%ggA(=vKJ{X4krx;+!Zddf(Ldh{j z7wq|&OlJJIj!c-(ckHj^F>N*dtlp6ytn)}WXx)#)IcD;VyLpsgP3ol9>QHei{fsNEce3OFpcgkH6c!F4^Bq>Fd5s08xfb3?>Br3_Lw!%+C@ z>`%O@cN@=osZrApE9n0E0KPMInf%?QK>MDa!1+G0EWDh6zj_|S${P;U?AkTv_ZkBl zak~M2@q65fIb-4Kw4JPm=sP%>bDS2Rm_e}Lm)X2zA{7Z}N89yvjF)>P`Sbf9>P2?q zw_VCmQ{jg0Gc|dh)lZn}bq*)HtI?d%$;41rjheZ8)AA)*^lr#G@LxNEg?k;?n74N! z=<0VU-(!b0mnUKWku`8>mNZ!2t!DRk<}t>mlH?T6T#Py^Le-Am28jR@#voIUsvK!z z^il(0axK5NP0D5?6&7I1-lr&k-W6ZW-A?)zO+fEYe1=ja8IvE+Cm}&&nDVfb=(Dg3 zeCHa|wGPYRxvVKUHV_V@GxlIy+&H>PPm39QYc|nP-Hur|i1M+ymAUym!hCHjVU{?v+K`=^(K4>|GQfapNrM4sA?F`JZXLBSRB@*cy@SeofNmS_5vearD%w zm1Mm1cOqrnX=`qo&Cdj)$SV28!aDK+Zp=GNqLcPP^YseI8ES(wH&#)lhug52?4btX ztI0OKt(gAvBB)2GQ}hpJ-h8w~_s|49)Nl;{eLM{Z$r-`j67NWM3hE- zt;Pt=Ry@a_Yk!^j4!^Sx(WBMNi0)AtcsS}ha8~cDo|>o;gXc2DwZRaBJQibuR2~sY z3}6fk-BFV7n7!1`g5Gx%iShMA7_eqL#_h~wXVqR8Tud8JKPgqh0pl3@@XaPD_%NMr zD4!y{lf4ih^c%6Y!E2yVQxR7bF6G@97s$0$JnyIJ3{DK2gYQ2{(79v(!I&G(;6Pj0 zZRKY8EK&%`zl!lU87nlJeu~*@e1~_#i_xiSSvRh>o>D5c>XrZ{at=pq_&oi=G314)$p8Pn zGkVI!zs=-&mH`c!30#8#!yVgll~(k3)2(&mI15<=uA|cf|G6 z7}EvZkCP&tqQpGzSoK|Q&bu6bXClRom0eHI&obqNE~?zS!~5(ORG#84=g#NGWkqn_ zSw-B^N0YdGey5@mC|#{6Rzn|HjIKT_J&(IlM!2UcSGlHgGfpIXA$R)FA1d_k;M7mP zrejN-I73}WP9j&6JJY_F8-E~)TWI6WT@V@Or2NiPdN~(A=S{2*mKx3N+1||^%FE{r z_CDvnn5c7-B?q|Nou4_iwBy`pog3WQa(!;rZBZ_@<`@?Np8%xymU~+~Ey}ITv#m z?&7ggoCXzePgr}-_K7CLq#%*2skS0ruawoPo zaX-eoauEgVIXUJncl4$L_kN2zx2tnLmpdHH72iI=8D73gm4^yB4+%B?^+nv$ky+du zJ1P3W(UnsOH0E?eS8`H=H>pQqBv+JDN+m9h}F$8q=k)95paS=@hn45{1e z5h{~t#tE1l&Y|cDXK?fiEn2XZQ)r;v*jJLf3vxhhUe(Q$5Xh`4QIF~5>ec#-b0*y*#XY;V>!2nZsknp zyx=q=WjQmkEN)y{H1|eFk26_!hn^ZegIjlbC-;U6=icU((alk%+~^ZtT=ek_&eU`} zH^JsKSLpCRp3cKB$2aW%2}J{GD;5#b1dRB6M_wFz1)T?!yZNgUby~gv(D8pvAaIh>Y4Q$QdaLI+v~s>QE>+!U9+rl%2*}@j9AVD|IMcA(~77vWq7rrdrF8Iif3B9D^`=yWw{|zh<*k^}x z=G6-4!gdZ9hK?aZ&xfP9+jX39n4c_sbss3)9nn|(zcV zw?CKLcaHFUNvv>hwz_bPDV=Ky`GptT`~}5zPQvf^chLMnkkI}yO!$5-O>jBAM8N5H zh0ZTc!jWG?g|wh}Y>IpgP*AvD-yh z{<#_N?fNUMpK31bQJ3{@JCNGn>`(AReT=(S`YdiqKs4L`qw zy6!fnEnD(214+2Vcp_!4XBc zon%eVb0##<*NB4gSXlY54=qTQlk3(SO>4GJrPFLpX&-|Xbo12V^!6AJdMj=>eY$Ht zT|aUs{WfV7Eq!>7UfHmS2D;9p*HhL}-gOZza0e(XE}#cPB4}d7aXR*pA^oipM|*v< zqTn%;7THFU%#d)J8CwV%p`rAAFGA&`M$>)G*7W6obu_DE2HkP+D(p1QrC&SCLHp2n zdj6&{Ro2l0g*aPU(>0$SZug?`@gFfUzgO7lUvgm;jekL@&q})4W-pf8459URgCS$l z9oRFe9A>T=OM8+w(&T4yVOxz~uzzxnbl8sVbVG48jnZCAYj4`o-Cq4@&%HIY?{^zo z6n!2Zj;s{(Jr2}^c+-DPOX%?=Cpx&79L+N;g2-%5S}@X?LgfUinKX{RK9xxenE0j6P9wqKPfl^gqcG>hRecv^SX3YbTv(du|$id;K+Jrd)@y zY0Ifvs4dMozY1T9yt)CCR?_ZinzZ_s1Jxa;PAwhH=|nvbIxNMMsV@o?c7t?(rljhOLC$Oq7(EshjS;{PF*^ypy>#B=r zoS!uv1HPiydL-?=I*h*6RimEieW2nsN4399f%=ScIwklNj*@E7>x+`;`p$HkD}E23 z@&cOoNEOJgne^zqaO(Zbfd=jzM&UDfNZK~fvBa4&3C)kDNAKOWD zhAgIu){nqx+7R0ExQn?`MQ{(4&>42RH12{eJylzYkE&+Ux%CxbV}1|}N8KS+#)-7_ zej!ng4WmPL6FSm&CVdgGjQSi~Pkq*MlpR@39sm7?*_X4RIKztiT6~9>V_V6cm~?vk zrIdX2GNMld2w7{jp03_MiKYcDqf_;)>BtR}XvXyjdMVnH?%lqd2K3=*=1P5fXsHi< z{og*?rE{MCYWl_&_cMfw$E7go{U92$?{79(s zi^ESHLujR^8eO(+DouHEgM^(QN3~CHq;YHfXkuosu)S|5Q}3J%y6Nvk>iv5hEx)Wy zAE)o6M&mY8gZ5;4&d8OX@^_^>m-VOqvwL9evN3QowD+0UhiL|Mt4Mq&w4^!MU zsWKd=54S7?|2jMRO+E%X$DJm(2IgY0wG|yYa2E9%%2AJhne^mvxv({N?~oqlzAQM& zjb`2A=@Qj1q$OcIjg-xUYrZd8pH2Iz-ELdj^siUgk0LiJdlxTy2IayI_m2csD_5G6 zIDpoSSx(QqoDI&~7g02nld~|0f%(*y2Kxol&cAwe+B*ek^;t~6rkO#XS0CVu@c_8` z@D^DT?ndJ-x54w9j+E?NM=Kwx(5sb-Mc5X!_|_1ccijCRHQCAl;nMj@D@uTX_1`#GD=*@E1;~ z_|PcFK5}!mZKF=UJ89;j$&_o3ps@-6VCrI??r-p=H-sXpa+#-Xy_IQ5vs_r_r>XRS zwKpxdqO`ZqDXQ14M0b`Oxe4VkLzIr)|4q2>7 z!Ff6zGN=`lLw|#u=y3i|R!sey>_GDR9!$TRM<&eJPEEoc==uH$Y@Ff&+5&@V&LdxX zWb%H9Sd4I2k((PkgHuwD*V=%_i) zRAaX*&X~86{)%5iqb;rI+3k@eAjpD79GXL)hdI!37q`=X8^TG^hge#9 zJCe5LIZ}`KiF94eIO^E5k`9hrO&{5s(4LtC;a`7s+P-WW@u(j}3saT<|9yCMbh92Ft%oxlqd%nfmKogE4}T@vHv+gZZf#tzh!9M_BYtw;uml)pXQxiSt-wD`_xHJL~C>R$E}y#j7_!qW#TEVJW(SF&y-ag zjSvO ziAJ1?`4-!_{H4{g1Cu05S6ZsK=3M0Vs98$Y=NNG9dlqot*F2TXn@}a$b$KY)-(-tq z#`_LCdPB*ceHy}DH#sSFEV(K@diPHC=)G?x9v{;su0KsAz3zHR98Q_m4E#s)}FVFHm0k?MQxg@ik}3-Kvd}o%da(r30cRN40&qqJTYiX0x~&4-Y2&-Z4s2 zxq4J}d+=Uvsfj{OrO6d((R3+iG~QgQ`Fc>*;FrH8%bN@(FU;(#9ZuPBekB~IU)91H zx*1Cf4R>>a+1A|d*7sE^Io~CLvB#yZqfS)Cl%{YIv&y;B{{pI_cBFCR!VaHZ(|0!K zX%{4ouJNs2ws0nQC8dAOx|n{_Ir=sn7+G_=;TO2|(@Lb_D{f0d1ZAn(wHs2!@r7Jb z<0Wp&!5}-sCx4_(UUQ|zUt_Bc@5XY&=4MOd{x(-lH9k~THu9SE+XqcMkB8BA<{vw& zo&L_?x|Lo@od$5yEI%#o@t;O+)!R+nw|Kv*&N1fPPLpshJ3fX}jO!=Wd!EMewnI4Q zZ$G$UuJgHbf5Rlq|F^Ws^)FXECsFd@?;N{3kLGd8*9X_E%B`yI9bG8Ve!N|Bco#4I zJ@dD8_*@(5ChrER@|H^JM-+r1uYgtL~MziBnn8!cG5aFIhBu0#~r6g=^}HlJ4;el^*cZ=M;{N z;~pu7ao6R;xI5p4?O$gP^? z#p%X3ad#hANaqJTO9N+4O7Az`tZMnWnj3Sn59gCtUmd7eBY9D0e|FdJj_TaJ zGu30-+@w~Ev}*<{ewXy>Un$8;+s0WNa9pwTv#R7>mm~qIU+f-VHsh)u{N<(D2@;yR+$|rP*H#s`n{9uL>OWms_i-!ma*jYo{1#*_nqyQhvwOp#vG9NEjc8O%Sp1U zaGfXJp|n|w1tV$}?Wek z1bm8<_Ud;+@}ak$)Jl7|G&OLd^!0*uT)4?#>70H?rO9PKBwqQk(!{GST=B!x(z#BF z+@mX3xZInO(tf{2bM6f%B)a?!vs0H>6%Tnc*uaE_}!uUKF4cZzTA?)vvog z`VIVy?uLx@MKv$uAFfeC1!_d=gKN-QgECO z(=4G&dphxY+XfmU>w^bdmkO&)IUz9i6IJg0j~r-H6PBcWpmqoL)~$5)$E#(V1f4B~ z7;(6UpRRNs!&_cMaoTLUlj{e)*k_!|X3;-Ix8b~j$R``JTKKQPQ5bRlo-lBr3(Y!E z01rGhv1MbfU~w~ya7E{2rXl9SHq|nm+i;#5UeQ6rF$3|)>M4SX`#!ui>obvC^M8NH zQS1F>@B19Ugl|TCw&HLyBvuJFZQO(HTP=i+w}oun@r!u;w?N(p41}EgWEgBC!J|c2 z;NuG&uuXf4ui`(m{FF|zq2U=@wR9eixVjpaj5)#2)BeQoix~lLJQ`4O(+Saea)C8Q zZe$})Fqy`OG=5O|BCMa|C)3r9WKWj3K}|0SjvA>dWcj&6FI#zhoH-tv=8q?%r#rE8 zzS&~7N#sGC-N(O@DU$8}L-4~z(dF0qo<04ni{E2cKm?J(Ft1G3XNxHe7Bg98p{Lm3 z8=8b(lY`v*2hjWHcbO^bv7M#+*Z|Ea+_^9r`D2OD`X!Og_4!Elj$1(XdA(rCHAy7q zDDZc}G{}f2dhlIgBs@|w0KxV!)Wq`qGc|jn8uCV`U% zW3Rt6fxD0Zn!2ypyRu`XT=Jc)xUEJ~M>*s7>y6OX6Gm>vg|f4YL?;w~fiEw!fm>Ue z@mw1ZK1by6!Q4Iwm(GBOx(hl?TLPALQoK9fTHJ*n#+w($LvL+G5-X>GmL9*bv1kQY zt?3PqXPt&wY6sZfz4q8L?H>!a)J30NQ$btI1^ZQXlZjTwLYj6mJliDZR`s{S&7z@b zYA}?{qnWJtnt|Yases83I4QGANWfh8-(>QOkD^~$%)t~LA?t?bJBFytg~T)ghMxY- zRxUCnv4#>H*xW*XiEjBj@1xOMUITp_Rl%fPp50XFkLNRQ@>3$l@RquHvY*!wrRhts zad{rr-){o5L=MB$Ml!^)+y-F7P>;O;A)spr6WgPrWX z_X_BGGaqO5*T$#kx=E9I5}AG~o5c4SiGNFG;V~m?kWSaaa5@^kg>RE}O4i_y<^NdL z_$8pXVL2$vTG@ql&&i#ZSXAEQisKg-VE4G?Ajx^l?w#n6#alcDhcjQ;xLiBH#v{d#>W0G`tpTjQUqA7+e1)N(j`0yu zXW_|(<&d{b5obiUk?!L^*caJcoE8_!rYeR&$Oi|8HxfvS|7W6+EwXG5za|EnS@`#6 zDN(N9grVtuS%@%~qy?W~>;8-;o7quj{CPZUj*=((t3`g+ zbeVgTjm}&OzG1etN4#}#@}<7m;c<%;ckAF+Q)C5Z4QylJP-bP>MDiRRSn+Vt6<~S> z497UhG;3PP!nq|tm0qLqa|Z~!c7UYo++;yt`oYn!2icjWN0^D(E^-{&S>wJs*!?_# zxDP9r;Ve&5+;9L6i(Wj7Bt6zP>>U|0yo@aTWkiD4={1Mr2JfwGYIs>)u*e6xI4KT(TJDq)lQeuB5X!29(%9J_iTv&- z8Tc(o8TTGjf!n7tNk*5M=veTDQ_{DN1}ie*N0Nukn<&zhP;&Ryv+?0UP%KCA}amCN`amycu2?_+q|u8~YH8G(-SDP-%e zC;Y`@qfn{B5q=Fpzy;i#i`vbJ#N zO1$!}N_OFm6BrCDVHIMQvb@t11J!-u<|;{@dyxgcEn1A`w>0pbwzuP+eSYlClUaBf z+(?P&$9UF0i0G>2koOA~g3qH>dd)Pnlt*D#j~~0^e;Rpy_dg_(VA2+C9UdQuGaeSeA_LBKKGC-YW+u{{*Bz znwUZ2EY`96nk;o@5!?RbEo-jdgI|Uo$KJnG$$;h{)>Sqh^HQt9Z=eE8Tw#Qt0{@ZB zDgxallR+!d75nsB#&SPIi=NI2Y@B8%-q)@oI|ke#8=uvake<6BsVAiAxDs3aMs(VD z*5VXpkr(vTiS!jo2xqxlOg?uVnV>z0AK%i&Yc}4;{mNHiUH$|z_F)mMJeCh$KX+na zc0T)hJq)M(7>SFU^ueWV0Qs*(^of5S2y1j0oWv9OeeXS4nC}bLam*Uj)*Zs8#Cg#8 z-#ZwV+`u@)Hnu9y25{LR)&R)c?iKl#ecuZu2Ixw*3%BpU!2SyAH!5Q#a-p z@s}OH-Aa}hZ@|h&jp&)$Npdok@#W2CRPGr;;--Y)!`-Fq)t|*kwVsmqLpj;&8G|8p zr5-q4F-Q4dQ?RV*Af&dfVnZ)oX8IQtWn>Hol?BQ8KJ`4C_E>{X*Jdy59J~2F}yO_g>SA;Al4OD-R0eLsuGi^E9p-HrIeEgQ1Ew~z@>^08=*jih<175t9s3!^k6 zS^N4{=6HSr#8+~7dzTHCRdPi4Uk2P-8pCWBb+Kb7|C0Jo-LeB&*Fb%r7PyQ|WLx%K zVW^q~#?DvZxxXHisZ7BRS8a3|cHXhr=OVfIv_{OGTyePVb(^&hi-P3Iskq!=87oUq zW^{%Rj#5p6r&~0HP!~14-na;P^%OY1axwIr5<8HcNlj82^)2yPW44Yr)c!|?Cp}_y&Z3)piX(PU+l9w{AKTkcn+`i3 z9Toe}(;PjXWTLT36UoSZMov!MjddQP+hR%|Tr(k6HZN;5#Fi(K<0@fn^6QVJVrV1o zRCPTcpF>FtCCRJ>|$Zz?~Y%JEo>s12WSv7$~Puvas z7ZY@7NPrL77sOaEBOKZ?KeHsYLzN}zq=o9y$LBG#q#otWx`!oQTkm@n@JKo;ObvDY*+fE0(jl}Ne4m(6B~oFq z1c%(sV>`ynL2YXi^W49RY*e0%)My^BUw;s{ysu?Gb6>F1tO8PA=L6QhJ4irqH_IQd zM2wA-h+f$R*0sEfIgRik*DG|{g8zI-a!fOv5U{+qc*{Db`v%wFy; z0^5>j@LX#g@d+LRHBSQZ?adj8F}tvPOcqROiNF=dD#-`&v%*%J!?-1XEyitrOJ={{ zjYB-=I~K34?@RXYOd)9pk7MJ%cJzwYz{1iMaHe89e*D#m+rB?y zAufB!%rAS9)c`D5cLm%V<#E}FW7uV;i+yjGIi@|C29p;YL$?4^FmH$>KV*|ovsnYs zrK;}1_-L^H@S5lhc`Up7RpeAFo1(7lG{0xSOqds7k3+8cV-eR%Mt?X3)8jNj|B(xX zUz-AkH^NxtkUi{N`wAw>aw7Y&Q|4gj0;}J7p!L*1GIeac>}rlHDyg~Rh^2sA7Dd6Q zjY`;PChDS9XNSmS=16>kHnAl#GH`x&?U%1K+zFJL}BLcVRxgSfYc zA-_NsEktL1abgQ@dYJDxn@aHN*+0y;$Qwd*M&RR5AKAlm!Ia`X@3ws%#i{QWp??`26=R)0agf)8*%XCJ%1$_&2TE`UK(`-0_} z7-oJJaMI9zc-N6J8}Z6)$#;9uRv*q@?*GcJl_NVY*h1BB6Q*w_}9Q%g9CHuk-4!glBljgy+*8bpv*(7<#Q*wW07t59OhkMb3anRFN@?wfL z+D?$vr8R1!L0<{DdmQ9FTIH~$ZWW6Q^dvQXO8NOqPO*L3HZXSnWwK_jE2x$zW7gHx zd~@hfG!950hsED1W=t-bJA4D%wJ@I?m^>NE($0}-*Oi#gk0jY)zZ9I_w3#?Bxy(#F zMxuK^FR0iiW~!gp3 z-Ei&~bx`H6%e-s{GyQFu?8oB-a`Dz@R(HXkJw3RCo#n@X?dg-SB&vh82<7b6^JOq| z_+PTf_!3c={El}S&RG6yc}O{ZM3(ucnM9km%T(zaM`y_yV*mJN4qeB zHMbtH#%VpW->NO_T=IINdom7JJxap2e`@f<%oCvTUVJ?x4-)y#B;qhrAL9>?hwdzt zmH+rgV!Dh`@$x|MxiuZ;DZb!ueE!AvoQV^;&T3@;+6uDpQj(9E7*dRl-Mg zUA%Irn>W+70N)f12v$DCrVf`!xqG89;}O8^b9-UrMULG!&_cIcqh$&A=PXUUlT_^<`{+=oPkmTMXVkBL^NK7uouI`}xZQA2|*(aUys5O7?cwKQdxQFNj#9 z1FsVENl^#H1Dw_7ewEAUy%m5S6Th&Yk(-5*U zu!jYz{vg90Ye?fgO;*MHAnwHjk<0drEj+#ey&9%~rF0ppl-a}8`c*7U_+9I}Z-=Zy z>{s89RmC}RGH@%g#a`(;EMx6{5~ya3%Nvr(q(>p7aD&KmG74wAOQwL$-7Jz7i`cU? z03-jmPZau(Px=>1tcWR-Rg3$6yHLyso`oTnX1F#*2^%)<=5O}*=M!q~)_t4#nq`Hz zFxgj*1i23dn>R+{JkJEK+tG{`IAY)POlFPVXtmuLx4n*mwU#b? z;s`Hx%Tna~-2KOf*Oaj0?NazS?Xhf{j~sr!HySHDTgj1dH)3AvPu6xs!PJ{E%KAo*0b-*Q|k5 zcd>)DEDej+-6!6Qs#!+abTqu%i3?A;;`6FIbuI;aaI0p52<`WUSc^oq;jLRHvj{F+5V>y#Jb3l-z#YkAm;F!#M06Mn&kgO!z^(hy zuEhvP4?jV!tsf5tO}^~Sk6etAX%Mr3G$@;J2pwL#lV`QaViq06s}q}<+P;x>!|bof z8sE3#2}KKVwTohgpGxXFuH7Lz1`VvIY937X&zF@yu!hso*KzKrNTOjG%v_UBv9s&% zv5f;Pz_Wi0yeZxT8vW{IBQm4e@5KQy%-jxIU8jNV;Qb==ZWQMHoQpPn#$)knMbz12 zPF84UfxSWz*_@ooTz|InrlD75^Hl%Ide1vRVy}wcgLy{8K=%j)*12H%b#qWK7o8ke z#b@fhMCkAj0~6B(#~`Jj?Bf>$+4?v+cwpL2Laj>pn|EZOcc_6^n0A>g3JD~)E)lf; z8AHlz4Z$s_i!Uh}%%(Kvp?$V)%{a@^U}Rd$pEK}BO^18j9>E3{6h9`vN5^8RyBiGE zD?{45KXiDM@(o8a_#Z3F$f^YGx)Lu_P|N?#eu+Dhge&n(d8{@rQx-=QKX0-^>3QZH z^NHzS-U=pqMi^?42EGrQ*mM;qm?!e7e>eVR;pz_9c)*=4P)|h|sEh@p)yRk)OW?Wm zIm_)A28oxf$yW#Qp!=9W1dn!5ud;%yF%w~(ZX?j9?D||Shz5tQoLR@F!h9wOgKij$^s#P#*-5_%D-83Kz>3Z`pnKHl|{7hWO8HXFrzC#^(FJn78}} zKho0>*@Jl?e4MpsY-ENJxidJDSyC++ zcXI$X4SdShSD2#t^LBp3`Z?tE;n%GDOab|IY%mM)91hPkjo69!BSck29a?g%&~!#V z#7SnO(^W-CSm}Zr+zeUjhdq#b|0sX=>O|K1a;js`x_!(f^Ahpjpp4rNdxPoQDYD&u z@hpBpB0hf*!g8E)nXPs<#E#krzr&Y-hT}c(v&>{|`l2&-O*%fCVu(T9TCyg-o$Oqo z14C-pLqcU9+rDHgt2(zCTa<2*%Gs}Zbz?PB`=}pmaT6VzwN|ig|4Fj4krM+Qw4;oiPRNwWhLxUShX(U9D_Glozxe*MMlv0`k%G72^ht#OW~`aPiqVm{VYe z1<|MA)ICpTI_+wmM)X|Nsxe^a{gYVEnn~#N_7=am&{1~hvN0+ro@G}ah^*9{3S!=r z%dTzX*yT}qnD@#Ehja<#e&u1_JLfUzvl@2V&=PmNs^b+u42Gtaq9koVH#7ua01WtuUp`X~B9J-`0Y|B!Ipr3otaC|QqbVB5ihEHGvTeaBmS1zEY6iDhk#4h@? zVCHuCA$pC=!A7nO^Cus}Wr=0%WZQl)SYJW7!*f7=<6Wlk`Z&q%|APcph4Jq;-e>=D z>2-ga1~3)do9tMh*|2kL5VEFRvhct=958mS?7)!OVAvzxK`qpor`=PQ-hKpjPV`}) zYa3bLX-WKwS=~h0>5c4Jf|y-<5s5qMj+4vD88FOuJR1=6g6th0Aa=`&9lsAX#W%f% zKwj7A?CTERhT_!)^!R4K7OvozS_}qa6NYa_BhAB)uwu)q4 zh{9*1hQYm@BK&qdT{goc9sDnA;-}TUF)uC>6;k@JQxRtH_PA7LaB(W;p*Asn=Yxtd z9&os0Fv|;=$bKZdz$C9q*rF3l?j@(du#OSfUv4m7)w0A8s|9ReU60J5X9l^irU4~u zEn$^I0{RTfYLw?pOcGWK&rTBv$bbOjddVh zF9Y=>*5HU!TSfQIFxWM82FwoR;8On;I7Qr(H|?7zW}}y|^dYKne%2kf+*^y4Wl3SH zFc;h>n&A^JnLWGknU$Ja;HQ79+3oXd!E5g}(A*y;Ta#SMlD}>yt37N%?oToPQggup zE&(tqFG%!xo<`E602e=92YdMnM>10mmNbc77<)ss{do`mvybABEa1V-L;>RVMPSAx zEt0Ri05|sDM^-FZ0f&waWjfa*VVV07S^H}nnZc6F4i%o3(6oyuPpiKWm^lhz`FrNR z;S15+rposH_lx&Ut0Ky+-^kiHCALnyEBh|YW`&(Ui1z+faH`ix&~Cg-ir+|KlZFKQ zANoSROfcekaW-M6EIKVyKFN-$_XaZDkWJVY4q1-!WTKNcTf6@x?#_({yKl>|be0h= z&c49&h?k2hM7?TcC({Pu*o!hIx$K2A zL{6z9w-ck9bObNki(qOLM%HO5!Se~d;H`BcaWbC@`ssW51#1o2DZMuAin+=p4v}!< zvk^%If)}Ey$lwNFV9PUM=U;pB(Bl&M)TAixp1ol(KZ$tX@x+_0oGkQP8_C%?T{bDT zksoyIF~7wjm&_>3;2#_oJGVop5zlpfanjner^NPrE?<@_L^%?N$;b1?akJq)W1N4seg!P&o+^cty;7neSS=1ZUWb5R1SGEFI5x7xUj+5AsDHdt_-*TjA}%8WJ<^sBGQn9A=!H$EROgsc9Q|9 z3IpKM(FEvBuOOwf4G8nr-#akbSz!y?{nWAn6!^aOrhP_pPHq3Pd93GQupHOiD zG946wY zz5Z8n<4mPYy~mCjeYVCx)eJUukR6%1Fp8M03Ik95P2_0w7Pz@q1OAp2!o?LQ(ewS^ zy2piPXtw1fTk-E1>z5PqU@m!@vDDDw)}Y} zdo@e+OzBWTKN`4o5FP#He>e zanHso%s*<%ir=3hCVuB-FNZIdDc*I!-X~Q>&+9Un(u_><*jbYu3AlBd#>Yb^VC3Oq68#=zpVn=2< z79QKqtj>;uXzd@4&UcR1^$8AjwA^|NU%VbmlFp0Xu%(3<66Vh~sED%)qb2yE>)8P}Z>=f^j9iClsN{>9vOR?!{+70zwU_4^S1^hRDbdFf34 z%Cli8m#`hC>s@0);eRCV;77-66;@y|{2iI`Zn-S^_y8!C9f7?10kU-C&7jgY3H&k( zndg~OM>?dw_JOw^DBoCxBi5X#3(<)Jv!Da;baWXBUO5QfpDG2nZ5@1u`3X3wp^gpD zch;?X83U8-V%fvj=fK%i{QUikv1`u##H>~U*;06de4n=hw!D+!nH^I8g2gh_R@?-g z+ciPX-4;ggY$jXsyV>4`XR>kJdaMm;ko~y(l4u8s@7rbP&^YuW(Nnc29epafaOV%g~TYN+nsj-#}9K!;x_Ue|Qt2X7sQ+qWqZ z@6{IQYZ^$_h@EVw>5I|WOq1;1OwsXzCX|l~WKpKIxX5d=tk_}>*%GXZch1D)g6WTO zZ*2mo?)D*1#}7lB%)NEBi#sso*&g07x*T%qHOKPViI%ObaL&t|e+zkwQynVSkQBl;KnICaClQc*yi<-4ei>7YegnX(2r`2 zb>9L9;x>Zr-uvWQhz4@~Twrwhdh$o9kQsTlF|(OhW%8>BV&9B%W_alii%vKvTWG6; z+c(SO`%SyyYn(RRI&uPDJga~uhu^ag@tg3~QyZ{VnFH3>3Ah?4P=(92Z2Fv1*1Wzy z*q>jEAvN1UJ)joMG|oW8y9PF*14SmF8g{b2Snz_fY1;>Y?(xx>>gvz0t(^^>5yR;u zxyiUKBnr+1$)UwnL-yP3Fq`f5hsnepa^0E(V5E?O^=d<8oXEjTTHgyap7L1uvahhZ zNex?*j*&D^J$5;NHtgwl1V(q-!o~@+QBCeC8C$6YM)j$Dv%!91RNqi@|9w0udu;)K zjRpxfqZF7;;XroJL>qq@#gbDC>oAQ~vPE~#GDY(ou(~2oBkf$s2|GXdbM+mwn)#bt zi>w6K%9F+k?bt<;)gYlh5loIk`B*e z$XPQBi`LGA^?%}E-v?vH)vd;@ZT~UcqY2gXhmf7oA$aF%fAkakcvf3?l5PJ|$+t-( z$ooNM;Pb&7oo8Pr?lW9uN%7h^{^MVkyrQJ;+aFW7>-34-+wcgo8jj;y{p}=lP9q7< z{vuPU&xHR@bb5z+2CPD>!{L>m}pQEDVd^YL5wVU|OjlghzAnqld zWaYGeIJ)XI8PikELa&cUy~W>&^8FViao2BIYL^n;6$5p_se3`8w-sFIE|)nz$QIc@ zW%%_{63i35b!T0N5z7nZu$_-2=QBX`xC-cKHlMt+oX4Imwr43m$H4JY8}psOSc$w1 zPH1|_7VVfoet!N-+V#ZqsHvX!NIFe|+b+Ukr%{-7tCpm1IS2+GePPC>3R%2}h1U{U zijx&0A#I;I`Qmd6oUe{$6HAiGw3l;m*EMmL$G4De@hM6#TxA!^tmcj(Cc&2n4TjtaswQwKt{eFY}+7ydhy^67GsGXywxfFdL z#E~>vh2!b*E6L)E8E}xB0Y(=S;r6%~bebIv&0}(~c5V!DcEk#_$ZbylFl*>QTI}>jBrZE7+sK zV_@^n_9>xQwxIDl8MfLHa%!ewoJAok z&WdI}uk-m@%^*InxG#oY%!B;{KNF+e2PE9Ti#M`($#)ufvOJdp*zqfY9gDcg61~pj zF2^`r^5Q#@Zt;VsLAq>-R-(7@aPahBOa^|}AbW4VA{9p~iIr0g95>sF zpNf97Q-yvImJp3q9Sh-_j~acakp!P?EK%$J0XD4Co;VF%LXu2V$-l{l%*?%>HCp_~ zN-8#CA36#;Zt9@bN${fh&;N@t{R zheB89T(HvJ49$L~STW`nev3$DhxDJ|<|k4xT~bL-noP%><)uvXLL)meFN6(#S%HU? z$HA6s$H)@PO60m+QM1dH?47e6S~j%72G4Tjb#9a0AMcQR;Th0>i!(bmqJczIsmJJ`Ue4?n1j8V1>hAmi}cBs7`UlngMfh=S$J6wBRf&?-P&LAF3g6t+H8a zfkZz-bogy_fJ^%gamAAuvi3kRTvzdfTmI9b<#;K&kI#gO2cs}`WI3-WzYe{o?qYMJ zEAWBQCsa^NWPkWLhgZ(mMb5-hw7PUk?A^7K#WvMMA~%}73JwLwGa?J!vw&reUnu74 zuCWP=p2@b_*Ff+Q1@P&!4D-b?#wL!3#M}xvnzRBY=@;UTxg~7-?>cs-E*|8?-S%6@ z3ViZM4RhxlhmaAALDxo;EL*G!L)DeYuZWh~hbs-R_MZX?8u)EDYA@AgclXS4@m-W%d0w+*md zs3b*=b1?c$4f!96&ciRq_lx7&qCK>X5T#T|RL^}*6b(e7logUuQf5S7?Ol|TBGDEa zRH*wprDQ}_NhQkOBT}~C{reN1*K@DyoX_X|R_+-DC*L}-_7#Iraj+Acxc(|1BIL6r zS{VG%je_9$O)$&85^a|+qkWHr>#qBv$m>)oZ@MiQL_@3DF7-w<65=+SW2LQwIx3m2 znKTt=6@a6bA3y%(dXVIQQ)ihmrZ=lV$M(4}@7 za(H;1EnQSB#K~La*&5G5crGA=Vgp=p!fk-9wvb+oTQZ*QeAdmpy?=ma&t;bD zoUb2aTa4eUbF1 zeKtSI@&zmMv!;H9D6;$_iMx*roJJLfH_HXD&7B*pcxMSE`<;cj4+?^wcnBHIo(6-i z6jJ*zH4KxE;ALdfY4#L_+9~6*v1)5NJ<-izM!yZweoQbu6Z+3R8sdjIu z`=R*H4`%sPa65N)K;y&Onl|MMW~2~~TWrhNme5h`Gkp=2wUzN^@3vE7jUVn)MdbG@ z!G_ailqzn9al8Xmygv@I^-4IxI0$y$?h^b_0^f4sV7+yWX4x_# z+CS$O?0DcrALUZWd&@6aq8N)|r=ALo?uX1rz8b8^m6lC>56&7@Dd8^GUiSd#B)BG5&yFHY9 z9VJB-PmiK%xhsC@d5*FH|Ise>cQ7h00a)5g=6ZbvyE!P5Yo9WP_dKcsQrcrE=JR$o zD0N_r6KA(`Px>ja?}0sM2A>g&X`l(NNO4 zYe;Fg$H1Cz)$lObi1z<0;A`^~xb`n@G-rq+%={3~Pd04gJIY71z*~>u+OsjJ=f6x; zc*hK^Qr@uKna*VV;T}IhK^2+>KG}ypZCr8MoJua+)p+-%F#pOOcu&|Hra4cC#%fEl z(fL@@f*r7+E*a;Zn}NTTTR6kMWn_4-7Ok)UVqP)Il%(cQSzFJrHM$dU>8=Pkp!S6= z8(EDe3pn!fYh@?TEnsiuYnbMk0g5H5B2wHZWLQ~XfsE#MZACh82X#<;cwU!?x z7XsFG>NxfJP2s)`qJ8`C(*$kgf6H}t%vG0Ntc_)A zLOyydjf15>+F|EIdunPMN0)SCcvn#sT$fED`Q{^Z^!ph$D=r;}PEKHUg122;@jEm2 zNMNc0vp4kbetPJbN}=`&tXh5tHd@T5m<4aCZd3?$UuK}_J3x!$ljxqm3Jx)zh_hro zsxPS)u%{>XppI4*m0U}ye%!5zxknT!yx|C2sQQ&Xj_}8KogZMk8h!G0%`?T82>d+nI1+YB2R?dC-sLh0NfW7M&|6 z*j*Y-4Hrk^#4StMxlRSPV#|C8G*yB3lZBz(+8f;bThDp5D_v06qD_~+{srk*OGtmx zarW}aZf;8TCDi)82}90FVAg?E^lQsK?(8&OI@wwTPrDRh?E6Y?t6wget~rO*`ul0` zDS7ta2SvP*cbNAUxW+mY^{eCUYpCpjIsJ|>MseR1a*aQW!P7iYuY9q_B%-bnS@JJWNKT^4u?YDO#)NDgXQ##Q|I(~6ejR47u40WpOT5pFJT0l*C*15 zv$p(5=;eP*GN48KQ}_kL1$MAf2*!R%y$Vm@Kc3h^|L$F2r}fI{ zZ`VI&GIg)O%G<+IB_E=pI)JfJ53eXdOB26c5iMN7;T&kmVG%Z=sF3lQ{!pf zxU*0kFq(#b)fRkTPq~{zJ_>C1>Gzx^I|-H&Vs&vlE?PZ4s6Xq^BV0#Bs`<6aP$aqI9_*wfACI$ z-nfrN|8X1f^brXvmmfz@;@7gNF2WvLPo909pg|_Hw5!kObb)eZ9?txFlO`#v(DLFc zuJ0>@_rK&}?%0!*CbNnEXxGlHPaWlNge&2u?in5FDFHu=sQ+9{SIW zwTMJGa+ecMEv^T}5_wFX<5%l7_2{1!Nl!d#IGdlokWq4;DY$#$Q@i=F z=t~;f%=rL&-9cb>6o8qY8upH@=87BCiB}#6f;WcRf*~zs5p&!khPO2(Y22ths@U>?UGLJNfyJJb+B%do z{uWK&-@f64CZ&+4V+xG;dsAeqIG;X-J>}=vkH&;tL>ueL^ee6sRHRokJt~3S(-H2C z8ji~SgGfndfL(uK$1FOYqeD(BydEKi>!+N+Jvp{`Rusy(&~fDQ>=r0%ouamkr)z|u%qx5kB56r8~g=24K%zGBZ?2U1*7iiJBSGwzrR zj%G*M&|b;fU{4A3?am{;U%y$*DIVRg#gMm0ET=jvQk1s951alI;lHWEjAokw+_TGM z6Aa2ICPt_B(rOLsGGA>-tL$WHYS&qxmLy60&4G!UMr7A^hp(6(y1Xq(Vkm$3|sbWIeOhP;6lrOFbdQ99G)@|@;+bhB<{yAyb;`F0a)^JX`Ve^-vf+=08fks7$|}UBaNl&Y;y?#eeMj#Y;6C&=k&; ztg0IEahx88Z@xgU-kihqs3a1VT<6kHR%5)276wj8WQwN3+;deE#GdX#U+xYqYRVUV zT+{&XrNtqC>3b|_*}<-a+@i%9>ybadl}&fKz#aaeNUE9sylSQ+&OX-0FPt67cC>x~ z0}4TjEr*%%*|#8nK7&>LH-YSz8G!YyDx@6Y8|G3Hdl=l#*mrvj8LfwheP+PEve}}4 zPgb+@s)gJt_e*3P*uu_V9nDIoWs&N19cC~~@NKn!qV!>9?EK4i(UwR_>kOm!+{;Jx z{Jd!4?EG>F{`YGq@^{~{{b89n=t>U$6rYP{0{rllm>!q3iXT0S&tx?>=5nUkZ+W=1*#cR_eQebIt9=EjfHQUy%ghK|` z!UVG}xDw-!u0svLFz^nTu6_e*O46iUc9*kQE{jUrEWp7ziGALYj`I>39rGBDr^5w~ zCArYucRa{<$I$BSNAbGDoaTRgr zZ;|AsMd)IniJ33=&{g$s`16)2+`r8r?5H}{&l^QAKDt2r{?D+;YOuAIc_Q|9SAx#maMr)b(wLo6I6fHP!3O(p#-@pa zPc3UF#Euuo*ctlledidy^PC>W<&VV$>+R`E?R4~7dKG&05YN4i$A`+^=(JgdnT@|h zabdGjsbnnVw#L)npf@bW_c3Q_no^_DbBKTTdm@v3IE&2;A5PJlh2&zWj?W%MLY{*X zEx7ob?K$Afc7=Dcz4}|Zl};ByP<{w~VMn&0)EK9aO$ARIM+|zo2J$yuhJk4lF-+nF zEuS_Fvn=BvK~Bg^wj?806pT-0mcWUq3Y4Ayg$fq?;oo2(bNk&NRC8X}*iSxy7uNOG zY(C+O;=9J-$`{kgPbQ5{&ynFf%)7ap108Hot`yylzQ8KX45{~eCR83yVqEW5+_Wzp zljS}6q^&9J-l28Wkf>{Y^=2`b?ll(O=GXGY`M%ZHmlomQ1#|J9!FKShu)y;BJpbYM zYfwoaUHk$vg5V8~7-JZnZ&h@#rZmF!B|| z3k=#`6TQNj-dlK*W>F>mbXCDu^{KGuz;Tw}s)^_P{77B6Cjb1Xt{JS`&BbZH1)IHX zBE_fcNh&*y=I<%z%++Qxx2NLR7ASb*KU?rSq*6Hiw3`$=?n6+(Om@Ih$h#>Ivd$t? zv|SaAEpw9Dd8IoPc!x3QUcg^HHW=IQy@#S2A+uT`Pj4<<=dbKDr-0$Z=zEkbIW(y7 z9YP?)RG9S~Z0w;2cOKHdwxQ@&XGl@eccDGj4i0%ZvZ9Tx?0Cs(a%y}I;aqfsTT{O}kqSMcC{X$Y?Mw`EtbaWn9VadEw#-oC zLXGivkQOSG4#vAGJnmhvo=F(qgfXW|C@nUWoO05rLwXrqdhE!$e{Lm(QYCbjISBJ* zB+=2sjeUrd#1|ijf`pGX|G`Wiyrn8wR@YlPDr7n@1|)D@-R5}sX&M@jpN3um8LUEN zfhU@JATLm8hFA+-kk@q(xbGCFZS|Lztses4R!3sWEMM5YqXU#SzJRWw(KL6B4K*0H z2;74_zDdY^cAqxH1E~@<{Z~#i{Qegb`dYY_t1>JhHI`ZBZp0{y#k50K1MLc8>F4Ju zsFGiT38jx&{g4=3C4A?rP7*q6ISSD9WUw_iaUz?cCBl?OITTA=0gon%VSj-h9eJq^ z8Q$jb>B($7HoX!1D7R*tq7~l1D$ATl8$zq1Fe_4$#*%+$;k3^(GxIIm8>>6 zq}tF7|0|sKI_?nO+`0+qY><7*uUa-3F-)gArJ#k7sl ztpylesfZfSm$9Tl9r z{sr2d58^%>g@8ljP|TYvO>S}L(7Ee%jfd+=cHrA6ZrLVJEXcVG{)I(+%ZuHZH}IT) zb4rSRpVv%}d{46CVhh}$r9>MN#$dwBbk6xwGHaR;4plR^lYhf^cI8YDlt+JKj}k0! z*%%GF+_x4^pY7ykM4zB*p0QNZ871_OSEBKh6HK+Z8f#LPV9b~_+&uIj8@zolq@*~b zVo(tj2Fk+4JnfpxjzUhkESolOUPx6bh0JM#E8guXgvnQ6PXs&mc^}o@h?cuXv<+TjZ9hgAcX%)C}WgjT-8OEz6{>IA?g-h-x z)8(6;>~2P*@I`o#KN@ukF4rFem+5CAz_}ZYj=vR6yI+B>D)RL1)d;YC)xcEe4<+$G zQFz%(a0sfDQB9y3>isNXyQX%4^9XtJetHrD&nMuBjF;^9xcgwGD$HgohO_(&DX`C@ z4lel{koTZ|_HX)B?5h=H2G#TVA1BUIqNWl0_8K$s;aV*JmoGc9P+_-NwO3Yu1KUU7b7c*wijhXu@27bniMvde^7$8ht{ZUuG}!`Y)7iwr_wVHvN2S)kVWF-V_i(vzKH2TeCr_V&@}D_cC?3Ii9GL)Xlg?V@W-TFpZ#0|q*ckiD?!cN5oBZDZU zC5tm1_dw_n4nv*!uGPX!6yN5iW6g=TaL`Bv|Ca7$VQM@0_<7D`=PHjeGN#ym!U_Xq z-KqbUBF#`PvZL}UIMu$4+r0V(lk{q4U5)iHZL1_UUsXmHpl!WyLL6H< zd=a{zm#kf@{S_k4NV7oa?fi_y6p`I%S2DS?mhlII$n8rG*Q(LSs|8oGtu5BP)ZNp3 z;l*_nG^G)?7EGth3ufTRS8-@z;|?0V59yXLw}{YI!nr3l!jrU0xD&pf&ulx1`U(s2 zht6Ln5$b^-)%4-v{Pk317D7R-leyCTpK$F-CR?=s3VW^~M)^0>(Z*>y4ogy?d+x(& zgJJ~A_}O6DzCmDrJ_I%#98VvcIpT+(g&P;nW4UK0q*Q3&4Oua^_l6IRRcv4-4V&rE z=iAKbjID5eCNRsSsW_u5l~)|)NIo`0*^cT&icP6wvEG3sz2GBT;$HxV3-qbESDHrm zzJ>zc1_n(XMxu~hD*Pd0kA*%>YI|?>?#wkJ7wbxB3XbIa2PC)^y$($5pd-C^GiOFq zPcVz%(ZrvdPPHELwD@E*XBpzb5)^eXf2$^L>5XQWs;xn9of}+xFC}V?`vw_TWpS64 zC0n~fp;|ZpEKD`t!2IL3V^OpzPBr=tRUeGV=g>D;6MdFNOV!hcNBR)>&6fhwV`Cbf9S)yYxDaGz9-%u+1YFgZpasulmQ;n)tAcvmI<{qlv)p zxXizNo)2I8BhcyaSJwJtJPgeJ%a$ZA6zx4_h?RQR`S#Oklq#J-|29-3dD~!{oHl;4 z{|>clr()`8`v$tOS1mDY{+s8_C6g+OL3vF2f744e&fmHLIM;yEW<5xZ?gEW zwfM^G6lc6h43p2sP~3{MIH+(wa?hgpA5n6U`=S%R$=5?$r5Y5!9}1JsTfu8hJ92c} zMS2@TsLrjQ=_^TL#cLx9ih0K7-?+)G%YFj=8eP;Q{hM9MawjocZT^sU9tPw*gI}TN zVD9mM@aXmo>XQt?m6vtV$5Eag-)=x-fAjcyN)Sa@G(*Xri_kjd7C1%aQ1-MOe(Bb^ z(DD2e?|$PD6inQJ_ClU;m5|%t@c0H7_gEU6ZjEBG>!#6Hp~Dv8VTh7?w&bMj#xI*- z#^lj-eXN$M8-f*|452~&DAc@*D^&nu}bExY~#@9bmA!EWK*8A!;&e<5n z*pX~Z(AT5UE7v#p_ThsX z-Jdf7a@J@I9miR)=~E`l^_sRK^~j}a7X3I~Dtg&8NbrWVLtMpD zGH}^RjR`IMm`z38yQ!l=(We+nN)>Q#bsNn8YzNtA74gTIztAXUN?Y5{gT;7vp)Wg= z4f|5d2Yal?Ew9tbFC>|yycXd$;a=C?z6aN~g@An3MC?&aroXob*vQ>wbW&T5`o*r* zxKDk{mXBM55t_ZAEndSyN~8EstLizuVKU5e&S+9U5lEY>VgX(o!w~&aR-?ekZILol zn;nO$B5%a-Do$PX0;Z3uWGLYS>*_aQZd0)6?F?ZinXOz?&^Ma$zIxKbx^T=H?aZt$ zmoU9CiY#nHDdg@FzV#LqQOdUyutWbesW=DK{*xpye>D8l&P+Xk>Cee{>;_OV=;Gm7FVzDs&odxrfFJRe% zf4uR+G$9xX%KnXm!j4=65cS<)#~gl5np07Sq7ulXsJ->n&7#aYwVSU*PD}B6N6@0e8D+ zlIVF#jmeOicy!MTXt5njhlMUk#hE`aJnba4Smm=(xh*U)N}C4Nj-b$Oag-2#oAm36 zo*q32Up;=Y`R|6)qpM;N+9-pg%4d;&QVcFI31KDg7s19?L$LL6DSO>fOe;TcqT183 zv?5p%>tkPX_b+V0ZI-=U%p0E1yfqc?KY7CQ%?A7|el|tVnF+Y1U@K8cJ+c=QKuJXn7UA~gv zqj>;S)Q6*N#bl^kK8dNc>>=g8X!!6+nHHTg5-8!0*u3@(w)V|#BgV(9913)fx>%ZIr+kIFs?U-Mrayiuay~nnK7U8{0?#5a=rYxT?$m( zCj#FrW0tkN1H|mzDDRbI?J?1pn%1JZ@aE-1&{!@!kHk+fb;jXPsd8`+Ql$dL7^v0A zq}prkH5Ggu)I5nN!=b-PO;?P*TOVN)f7{`xbY!2eBoi-g&wOiSarWnB^f6G5eR5g` zRkK`JkAo9V?)+BcH~u74b-hLFEv;y>&ybFq48M%mqMb%pb%c(% z>@phBvK=3%PQlza`K)DqDOu(A3v2{kOwe#=kNaZi(l2%9Y-h%=F>D0UDtFlDz7n&> z4Ah(!mW^FegYiY37Wi0a^HT4NM2RgsgnjEf&g<)9yx(C>xTzO@ZM4JVhB2sm?k@EB zNLt_PvBmRu4Vm%nNe~RF?BGHPCi8k8d*@_?ik_4C2mZlSpxXsAZbneuK39Cd(vIw= z#4`IMU*J(j0;y{i(eAD+nB1(-HjW!kRlUE_&vYmK{Gxz;!xQ0l#1FQ?Y#N-qUc*ZL z!+5{_e&qhpoD6rLPR#@iKtNUr==J zESS4aq^ETwF`y|Q){q4JCwQq13m(>dI6RL1angtUZZG;USsN`Ldt#T*4k~b4N>{9Y zf%JtLaJgj_+77d=xsf~q1Dy@<$Ru~#lX`T6x@Le;toH-odFWyD+0_ zkabI956quez9%GB>n!oYm=P`{xm|+-niE7#PfN&b$u$hNJ&B=t zjkr|q2&rEQVPy+t(IrZs_UDw-^C#+<2e(MmVLhsc9Ha#cvmq^tXYEDdc%t_JJ~hvT z=-61&m3c~~KgIb7Id3-T#X4N3xsdin#c+eyHBv-yA-8D7J?1TRdUh_0;FL!np(j%= zQQDa^Wmn`~0Qa=}|;XnW6Wwn|N(eizQBQ&##I*76_k&#UlSNA<|O z&7N{zo+DM;BcM{X5qH*)N1OVsm|t*_edgDTBHb0O4@=f!%#&1#*s*}kpA>xVZN(@OICKWO`6Zh^tPtt@?$}(>kA}zX%!!IV_^VVmSzjGs*D@|~WXsci6O9d_w{ z5?wIqqZRdMxEz-(?Dgp&i;w5%%FRWfA2JH-a?@!1;`6Yx_OONCyhT*LK@#u8N0Z{n zcr0G@lBIn!!+@4;e57s)n{4w29&0?~?Y^Dmt6oIVjHfH8ZLljlkRQuFs-|O9j5gN& z6tgz)5uPEvYpHLc@F~1322Uxw!EKoU)-U)=iw2J1b|(cIjzen0ULIkNIms+zRvp+D zxRAbcDi#;KW|O5-Xy@q)vW_t29n0UdYobgTG}Z@4^Rf6P!GLz77v;(uN8=A@9gGDxRZhT2c@^(i6nPhvC%o5p-81@C-BjIfs)Q;KGn47@Yo`6J;cD z>oiB=Tc^{sY|RaR!j9$e;DI&`4cm_Oj>PuLRDjldgfi|7Ynsx+4^%}|r|(p>_N3J~ za%CKthM&i4`UZ5Xv|Pwg=Hcel$xPAi8Ggzst_dg`PTo>U_-Bh7g*ga$A8^1qF`*bR zEP-23D@e8~fDZIrpx}=oc*G(V)y{wCryNNpxvpkb?>f|4@3cMiDRhX6vzKAV))vZ> zJC42T@i6)4ZWKSW7b60b;6Dp}$i!=?IuHVES|C39z7nT~GUVQbF;tJEFJsRz3$3l- zKYTwu7nqa>$`{kC^dYtGAJ-vMok~GVqVeONYgE}ghDAPq&+30&3Dquv^%Hp z9&e8e9heK)PeJ(doHGXh$U?_C*Wpf%G}+49(81Y>R6eDIU)zy{R$jw-+oVU#XuB~@hhQ=1tHCSyw}hTQ09(j7KLCJf_m9%6$Jhr=csjhto_{!&k% zkFKAf##{+*y{}|>M-{MT&qLNn0o6lwXVSLIouKt9i(milAL&;0!Sn7e_=I+(8*vr} z{td8!xeJ)XNIc|Z)>i7lvcIE*5nqQ44=N@Fg zBU53_&(pMF&M#&vYtLU9(+b0dccSx+43d&ILTkJ0bYY`E*`^l2B;}#F!YT@Gj_8F{ zVUMU~XpF51Td3#OE!^qymb)4M3%JiAG~v}y+!6Z~7ROD(JC5hr#OO2d?Ug;Xnj{K$ z#v{B}HXcX$jOEEFf!#ipz{kr7^U(2TtS0{w-t}3}g5DX@u|LaEE_W_&jw**AuEsd; zlOM%l1-@Eg&)vV0M48JHP+rE6{dJ3?XA-e&f_*G%%-PH?+QgBalN?>ylg*NDg)_%e zSDK>YixC(nK=A{ z2;(+r!fB=~k~yM;caCLq2UgCc>h-lum-mCL+qM*VD_A5}8U*9<3D*W! zfo6~bf6V_7+faRjH#|6l6n_SBdFq+j>|0?PGi)&9xktYyZxJrXzrmW}W);3PAdFcx2Lt3avj zftn>9$>fw`z}X%y$F$64_=%Gdy?ee81`W|etI;R${iN`kO=mNq^ko5^-RXn!uf_{I zf_MD9X(g~%<|$vdN`<0MWYIHYDU$qt6^xqQP(E9l4#mx-H3uR{Pn1Brv~@9S%NS5t zah&9yRazY?PsG_`GnvkYGIEdSb2beMgPuBPv#3Yq1?Jouu( zclF^HuFj*bzYQRldzoDvv=J|ww!vnl3zWNiCR>~G9J(@voSmX1eq22ow|%h2K&?>_ zr+k1eq*>G1pUE_6+GlS3Y;o38c$d7zy}@Z^D!orkW1CYO33oRs(-JyYX;!1YaXK?TjxyAnis!3_IZh;|eyw zEdQ7A=}$RK5M_{w&kBsapp2tD$Kl~{G5-3iqhOzBZGH3ab6j~$kD1M#gN?`6Le^s= z{8N4xt#hVQs@r2c=R6JV3JXzb*jdbcExeB+Gg0!twJ=)W9Ji#EVa28k_{KU23wQlt zFGaDKBBh2;ggs-miolmt)~ZP~*aKBl@35f27TE00ab2=5Y?AqRrZz5%-DCdX9k2=9 z+$11tTOHr)9|W@x>X6OKF>s@CCH-+Rq?~OBLB(Sm)_J(Wgmv-QS1L`ttp(go`*6Jb z%L3+y5Bx3(o zCgF$o24J3)%cRcBQj(7^`aY9K$CZmI>sA?LIZnjwmO?*AWf42@PaOk>tikHVa_n2q z7q-{)B>R`5OOmbX)HAz~X_QZ)vJhu@e@~!6oSnlRZ7AY$6?d_wrrl(`WivcYpG`{Z zlzI0#9iTQuoJ{AHVX0d@bsT>za?moNCkN}%H2E>C3R{nwb@}YvFDJ75bc+64BZ-^V z6Pscl!Ke03!K|41)VHh=O5c7F4RKnCx^r%_L@j0Pd%2x_`FZrrU77MFO4zuU5qP0n zfy9)fKzDEk>slC2kT(s-v=iz)%fcML?wXx-3TUD67wQc{P|&G+hey+8KQ0q`#2=wFG||h1YmI+RgsCX7Jl48^9tl zX_{L68b-WY$-2e%!IdeGS?>xfq}Fj{C%qS+%!sFcUlrWb(hkwz?U?C>6!!l)9{iF+ zKj{PT{mvQ4%)84Dg-6noQJN(Fr5#Sp+5oE0>`+WH3#MgXXGRJ8Xs5meURk-BJep!S zgVJycQQ6I^Xa{TFmPE@=mUA9KhInRq5#abn)>j#c_qhlfCVK%6SN`R@9G~Np$1?QK z=mVI??x2>m9_V~A8t!g%U>#z_bSF>3o-y6rocKq~xM&1uMp)qL1#xg9qm@hRe!+sr zEN6P77tl<}Ni=a^8@F99fxY}R67P6~!ST%ltnKZ7<7PPVy+hg46O>#7C zdL!`Joy;a#2d>Y`=R*2S`7C=E9J9Te1s3dOEx}=Is=z?fUEqqt=G+&(*VDz#6bqW} zg|K7ZXnOik30PJ&m)!V}RXS~<&l4_SpM%ifSf7y(C7%>k^7cm|OXP?}X7wE{pn)a1Hd{i554&GbyRQW1jFvS8~H0qd%wgQgTI>@(QAIEVw z9>RqYhdKFAk(3c}4IW17(}>THSZ}xl&YypVO>Z2I-W_q2_9vh0)no;(kp!9VKF)sO zSd7?r9)2V}V6&n!sZ)9p#(T-rAQw+~Cozo;SV>V)d@8W;EWTT0z;Bq_V0mN0I&M&c zJp3qB!)B+87&Z8vmDRgGyR{CYw<1qRJ4b{rH;TBts(HktO0i|zQn3dj-}B3)=>Q{qh^S~52g|D5Zx8W zQufnZ7-y!2a|MQzIU7X>CRkJV;!!BSJdat+er2xer)c#~d8>q-2{bK!I*1pXVg|3h z@tJEahHulOzt;koW%*{j@nZ(2oIFN1i+%9N0dGv+b)2dvxl+}(X8uluzsRe&9&Oej z#Jb;SW6sQCu7;ygH|!qUyw#8fyPD#+PBZW^SqrD;cf$9@&HP-iV%A=D3rC(kf@SZN z$ou$S+*dE`0~%v-hN`$I*7!9W7ph9KTJiWISPgq_*gzLqqQu6vw63T7L1GGVaCnV{m-vQYO77n%A+BU@lVP*4y+>*Nh!G z3=2yN1rB;Y+mvt~j(X;?y3cuR!QNXCzy32n@>(U$@MUzVWhuUSk;(Et>d>!OO;8qS z#LT4hS==Ka#|#^8`;e(5ed-hc`N~+PMxogIy%{VoN@IuqSy&&qmbNceSWuunMG z#MbVBX?h%X_RPlB`=rrsZV#;cyqA+V7);lb?o_Y&r^Xv^RN>^sOvu*p92_dM6y>w0 zLgpeECw{XPEjOveV3R(sZ|5ysGU*cA-{gfecOQqZlU{MNTA#zu&KvlrRE%bq%aD(j z6X{Mn3)!O?o!Q+9E)KJp{K9UwIX#O_()noVQ6@z}ZD+Y!!Gn>qItF80T)`IB!-UVv zz^%s?!s`#y+)s7%oSox}#%JQ^zu2H-S(8P*kq67R$`{<5x!??8yrw zO>rrDa-x`N++R)w9oyIqnG_0Znu;IpmZ7-67o9&*f#;*|a<UKxiT9-rlk7WVwBHztORi7C*7*Wo z$WV$NTy?|warf~<%msXOVgh@lwwRnPuCwx-0B)m@_ejQ*)G$4dUitiHwbwhjl9I1{ zdUFcMB@3+7G4oi^k-uDDOgMzMzlD7=I;Pl;9-Rx4X^JMI?%bWckUzhaGg4S zdVZK5HH)F5%RKb&7G`3~jKzLT0kaq}%KG9!I~rP<#J+trM|CH zSET#W1m~}}V_5+aoR;V-+$rp1+l3y|WtV!Ed1@GomTiSQqcriq_RK?Rq*fpIDl_eW$4d?8*JH{V7d`& zN5gOC@k-`fVcWDkcH3ca?UKiGIO>EvywQIJ&L4p_FBP~McMk}izM1q*doo$cR8YVr z6*@W53$5$ca@Ehn;JR%TT~lwsmILzibetSimS>Rp;|iEqvIg?_C+42naZi?l0Mtj%^lM&n7t`ST-2kiej0yJtSe;bFa)JA4LB zQ%>#X_2N>X>M-RUJA!HKz6Po2lJuE=KK>V6|Rzq}ddYg^ zuw8@Ai;$yuy&3%dJ7)MuK8HiYk-XWDg*d`piS8Wv$k6N?_F0)R{U2MILSZt>90;L| zw=%Rf>j}#qtwC#dgUA&{d0Fm^*PRYi*E%Jv|47tcN&#*IL4k4{5{q z4zXC+S^!U^?D1LuCX5u$j0fNChbP)g@b$)8=E;^&y0r|`d9*>~RA5V18lg1n@k5w1 zQjN6z7UFs5NobnBfwSzmQ$5$DoN|YG(Y26|{3Gr+?Cem*DGTr6uHz^8R~su(6l6>3 z-I~-}7B6%t6UcP(O_bd!i>rpeWZt??*tGQ^KD)TC=1|BRULkOiwNn2U7U$mzBYMBW zYsJMZP&j+8EnY<50>eOK=~JGcphYfi)9J6#K6bb@jklkkh=H?91p>G5z}=QQRmoyqR*yUO_t@!|Hp zOvDGZ3GjBkYip}v$wiuQ8mKmBNS z$6*%xLsiJh*TGb!gJ2`B1KE$OV1w;M`YB%m>k?Nm+o2gqI_t^oSr|6%?%>SoQXoKO z3(m>hO9_)^Fuk;L+%}>GK5R24S0NAJlsF7?6;?xQ!4Uq%1V#FoRAx24&WIcRp^}A( z6%j58$Di{bbDRGK)$B@JMe(a|;=*qNYj3Qe8~4b@iz{tN)kG34#E#MFFFBMlGM%d( zB9Bqmeld^lvvK#mCpD^*+n8a|MEEv!6+ctBPIP<+vC&7?qGf$Cexfgs&e3@;-W_k*~878|YD&kxPmgjY{}f_ZL7@caQ^dTCdM z1Ika~{3fkhvBeMhG}Us>BB_BnHy*(SC0V#GWC+Nv&7y&$0W@-_4yFj4Ag!cRU{q&@ z_HQ+4u+A9ln4ZIlt_ohN!fT*Xo(5}PBr>klgXFz_a{0kR*LH0O8zplEm&|Zsg%x4c zsAYtF=~uSr&?;VnQz!TA^EEHUR<0x*-}{Gv~vH!#XrdD+QI(mGQS#GmcN02s^S@fYxzuG^T!T&m0#{P2deF zt_Y>SyB%ohiU-WpK%U-zG6a|JQ!w^xAvuEyeE+(T#EDA9w*N{4nZ;Rxs*#kV19BOgPKs>Yqz#_Pbc)>@J8Le*~662o(q3 z<@Q}$TAltf7MpHFL$sZl)wqd~V7@pM2Ohm;&7Xv>gIfywq4or{%--Y4fLwgAVF<~M ze8=jWT@E?+G_>FzrY%sIvC>qR6f{2Q7c(-#3jEF8}V*Uws z&$`NsW9vkk=9AeMkrF(y5V}bo*MLD<6)PE)!Or>#c`sEDw&&=6Jhn=P$z9)u4bdZD z-)?D~9k?G|;-|rvL}N1Q`Xp%9RWPJ2m)#w-g}a>{0}pl&s(P}q68(M4_+6>OJFzqZ znx{BHzEUEVpNxf(qRpJv&M$28-Q{3lu#$aLN+9>LF4IJw8A!FY{a{#%zVshO(7 z-R0|X{O4ixD>8t3gGUKoDj(diYX&C%$t15OE6H`rXqXankHYrMYn@af+!O48(l_chThae9B!fMaczg;BtBrdU|w;%byH|AA*NZS;Lrg%}2uz zy-i$FV>p+Re49m25Ip`Dmq2GlHQ#PFj5fj^P)OU1D^@B)$;onh|D%X?tegc}jVd^K ztt6bu$zgR9^V!S%F^tS+BI_Lub)8osxYj|-lE zSKtH7*v+h5v_IK|y|%&>Gx|NBw(2qZeE-6(et*O2%FXfM<9*nnE&~HbkAkvWA)LjM z2G%q`6pH6hfVDD%aO-4!x*vXr%;G-tLG4f3`od~Zo^ArBvFZE;?P4ynMvLulbbaDK#(z(A?t#l zn6|^8CA&9qenU>7Xqho_-FmQnNfFaa8;bq^4P&8KzMyktgIMF!9j5oEOT77c5RO@! z50eAUM5ddk(4%uL(8{Vc!{e%yFN!HkCveQ9#IUHaCAH7jq?x zjrF*~_Wi9B^aouQbxwm#TU#Y+{&HI2q8E{HF0%1A6Ch9DniUUQL_vz9`0PVcR(oC! z0@WrHTCsndNP4Lk@4FzKpEvDtc!%0tK3b~)Rp&KO^SWk^`yf!t1qrlu-IBf>bDiPQ+`@W#z%uYya`XjPV zYZ5Z8Ewoy730P*93VQAqSii7`WeS?*NOwlDeVuH;Rl(O_iD>aqm$#145Y>;i5%<1t z;8LS*2z>#Ucr)|m=zZZKz6f%r(GrJXbFTzsx{njsCzHWN;s@DmlCT<4ehrh#zOnL@ za<+IZFFxEI!Y=>)M8C2|lCN7LdU)<6BV&Ov{`@P`b0y7AZC*-qkEudOnlVUUaG<92EvUD71gt4ZgBuot=KE?wSms`i=>j53aSGI zZ{cM#xVCx`SDSba7yVMB4O?XB@~UwZdUOcCiIaejGRskCTnYT%C`qzs4gx(p!q=Tv zWv`CPunj{i`JuX#$-ieMGkVj`27LF2JGukRy-5;i{)W#%1$QJAVu9*gWz*>21Txa%;zgwfX%JZ)n#Ym z;N-IclqnpBv!6vUiEmfBCt39 z{eUT@^F{6TlOeXUi@vHV&_bcVZ0+25=xM2BA9pL!%`JU+tl%7%9~}p-wpq;ScNHqI zI*6X_g%=H9MDtk!)UsbuItj`}S;sgy@s-W${1kP^%NW)iSO!HR-9yr}f`I^Hqy zGMc)sX5Yit!LB=9PL@>LmCrQ3l7ZuA`-=FR?b~ zB5dO4z{1qq_;F)5>(7;=J)_gHq1A--ZamF8N+MA9*g?3{VFbOxxq`JLpyP!f9oXN> zwtG3ke})ff*`WZuR(SymWMbg{buISRXE*;cQ(z6m8jzx+AIZG>&X!+~ghYWM?{>J5 z)jwMd9Y%#vY`qeceI=l^VhkLvpUxawIdBawLQ^gfdR|S!(~8yTII#iORpz4_z7V{Z z+5EmIV<2vNBRe|mov26l7w*q5VC`Exp*2K~whb4!&HD~wYw{Xgu<|~?W5HE6{ZAy_ zjC_H668@py+S{np9|5}-_^{3S&0O!;X8L;KA-wo{0US~2EsaiqoU@Izd~_gLO*?}L z$7Il9LOxsk-y!aH|3$XWumti>XVE^H!}N5#6@@JD1ij#9RNOcd?o9j1&KS%Rr;0VG zN4bD*xmm!N%7Ik)*@#Xg$1=Wn0P~j75WJ}+oQvgr=#UkB((fb5Ez6$IZutgE4_xTl zmlD_uE5WMO7&s;1&ZF?AKJQ1WgHy0;$t~PmbqP1P2%g9_b^PGzr>L*@IQvi>%7zad zB6w5d!1Q(x$Yg%!{2yh(w-55rc~2X1zog zQPcZl)bF%{Zl=_biGC$(- zg_r6AEI;hRpWPbAHe~;13&&RAHX{kj=v^^8~-dABVG(u9IWKe!9p> z@+~^0Fs3}5P8WaVwrT#xZiPzvRUbm*`3^RF*#VMG7iPzxF#6=8KoUVpIM%+9tM=^W zysT!D(v&RbAwJ8L_ZU+K-hn=;3)~44E3x&fKroq_2F8NFIW9S#^K_4}?qcuAVk-g`&s<&Oy5D=@24%`F9lnL9!0fmdstijl%4oGoUU`zz$Iok?A0PnXtJPZFXiZ+dIqZe5aHY{J20rB zgN3#zQuQyn>W=$&QEgBwFB;lnr!ow zIlfYcwB2(l1 z3BGNzr-eyrsOR*CwP8<^TA9K1FYLAVZybBa zjaFveMEw>Q_S{_o6BWR$`RGh)ar|8p1A)C{GOOanb=K|x*W8mtZU%XY+Ql?yBPm#xE+2yY#6tv8Ze1-mk zPLBpiOBoCCdSl?f?<)m{Mmsc^o59~Rvb;yodDv$;k-`^!$LM)lFjqSVCTV;_(e~+( zAr2Ss6YpcI);*&+*@xJ*%QN7&V<=Q59tH;oSk- z)mefNMY?wSp}@zjXMwv#oXnABxMk@Qh`rg*-?X))qFu}B@%<=KK>cdU6LKh*42IKA zrxZAJ?>_w*?MQ1Mtzv)UROu_v_+8F>;EJ>k?T(NFOQVk>75!As`t1_T^1R3t{z*g0 ztWyGSEKD@|_b5C+B%Dc>Cy=GCG)YuEW}$BnQ`}Y#L#56@YEU(P6LP})PTXV7V{b!o zKm&7bHlua-q*-Y0E{N0G04~}(WGiKbTDSh8>dQ5hHb@Z~WaTk%)N*dkl^IZ^rvxSq zvGC--#pF3~34FF$2EsA`D}@;=$!Y-oDTqOR_jDK|cLN`M_{1$5e3BKvvV)&*T`9RG zpFjBNAM1!{6QvIx2y*@1*txAll)5B^0)4FD!|X|<+v`T&&8GNYR{{&Wo5BXTufn=b zGf8vZb52Jil_E`Tz`JY^1T=^8Vre&Sg>eVk4U@2nNElAZAj>|amC^d~8t9Zh1$4Cp z9qm;qGyJ%UyM1SfpwDP?-r^(TZ8z$e-l}ZqnSBahR$pSbyH8L+eW1Rv8G(R#&+JF$K9^nLMM)IhY+EQBdL5MHk}m# zyHp07y3SdKCw20dwoJk8AF6pD{x9cLza1W(;Nbk~46>3O!p7{ggmM3Uh4x}sfme8y z=E;jFQaZht03`e4C}D@;AWmo^H1kIZ8?neN3q+z3lE&`2(&yUoNB62)W{ zI+^pCE&-);FX7Jv%Iv?gabhpcZ_reCfYWt+gf0-p5=(mUU;Yo)_-O^|`RcIUEk$hS z1Y6i<`JE~C{>J&uZn%3@GE^;>fHw7)e8E|B=qn9|eF4s3we&j+{3FYIF1rSrLe}!b zG8ypMZ9)3NjAcKKXP%b`ZD;Qz$rbO}z_KvB>*xSF<7SZdlZ|{*`YZH2y8%3UYx$~G zli1kON>tC5u{z3R}cXCZWFM6-eIZ{d}Ke3 zOyQOEbxaGkB&{)Wpk-H&jhlu+&*qO@l=fDB+^6B7zAS`{-H*`t8PedN^$K5YPr>zS zIuP#miaqxk2yX($fzq7mFzbaHoXcDXmjo8U^6*Pcn#}@dCmH(nOW0Mv{l90c;EUE_v~~61bz}YvBz^*V%r;16*_~5w7j6#M+`tA--wFk#=%zY58R~vnHW$b z_$w<+=uY)8+`Pvb&%2n3K3bOW5_S8i%_j?I)lMe2#mn%?wJq$_>1Eur@jMGU;LFUq zL#Tg3t=QjBiOo=pXOonh*S%LSVmp9b`#3mPo#p zS-f5ZD)mdz-?mw-apo&)lrx8i*1C8>y@|a|<;C`YBSkwlSMWpc30~tze^|D`6sp{- z0v(Rq;f;&0c+-;Qyu*+PNbVcN9_(Jkv*E<5JV*LPWH}4al7iP0t26pW5ftlh3I(kAraTqW1DP*cL z<7r`f3yCHSq_{1^Ak{OIo%pTFEI!Sno$KP5$!AIEINS^}Ki+T~)_Fo&n=tz>u&0Ba zQkXV2hu5B`4tTi(g7t<#T>N?pm~KP2WOCtdP!?^t5GGptaXQE}EQiAKR6O-~6-;n4 zVs|woz{)?KrVZ!0ka4R8KUO;nv$lZoapBBIA{0vnWiRKRx3EjJrPbn1?1k+O^v|Ed z#~|}Hf_f# zX6G`LraX>ia;1-$)W2l9Dsz&BtQYQ@_E&gocobVKeTA14`e1rY=ab)?My90pn3`0T z`TPEJq0;04Sma-VCz{=CMt3nO&zMBMciQQi=5x5vw-b84F9*+*6?{eVQJT5!74Gvp z0{<*Sa9pJ?e@Lbra&nfUbFBzJX%B(&$+k4gt_;`a2B3263f#b-gg6(00TuZ`WP7j| z-2*O&`j%Eg_`EfkT{er#Y$d6?Z8}X@I}hHLs#5WK39@Z?$Ue;(2n!xXgNfQrDmv4O zzU{Z@hw*9>J$}mCj?3{g^VhQJ^JH*KW;HXtA3@GCE;PurpU>zFqFG7vL}G#c=+l#G zX|Glap9)OK=Ds=R6^DU_AaVbR?qFp-BWZz2F*oMJAGo@EFnc_E4sF}Hh_5VJ4;#!R z*f+IN?0%AtkWbr>Qt4vwQdi|l6^yPzS?e8ypR?06s_T6_hkHwWN?g%i;>&XacP4u>Q5iQJqI$WA=CMo#@HaAoyaT7Kvh zdo*J;XbyS@vU%t5;{Dy!=-P(aKf~C(Nmpsf<36tRdI1(U^7v|oGXz!2(VSanXy%G> z{FQKDoN}p@Z~vsgcHW5qLzD4z>+Ut0*qcLr@o5xvz6Dk;s^!e`_rRa;TjB2Z6|iaP zWmE`x46b_}Y4N2LlxsYIud*k>M|(5c)Hn!++&KmRUPv?3=|^zj9c|J~o(EFzide7t zBmD9%m|OeQamLyGQOrB`BfsNHDfdGu7`{JEL6b^NSRC}5eagE4Uvn9M?RGTg8JOW- z)u|Aqlu0LK&cV22lc=Y05^UVEm%^mVxo$p~)#TgY@JDhqam5CDeXs=1)c3H{&lPFn z*eT4dd=q|r^iAj%YQesIMw3hPIo*(F@^DPa+g?a+1|sV=WpTU$ElN~-Jt60 zkEc?mOc^^N{Q_SsujAC5Mw5ceRrn+?bOaS%7e8vy1SxAm3$1mO(4WIRH_qbioEUc} zsRqvbO`;l67hmRU2#KlEq$8AnJJqf^*_@`YkhvyME2IdS$T|4)`Tz^BwMpzh#JV*Od!gg+w*G zaC{rAaVy5J_Q#pl=MZj+%oyhSVioJwO(Ol%Lg#>u4(2U9OtKflg*<1x;M?qH7E30> znES3mKWhdzIOQyJA16zSQO8Ado;=3XuSHBFUKVA8#=^9NGtjsEy4W;rB($Gj3)Y*4 zRC`Ejg5Cl#%3EjA8SXFbQ)%KxP8|jri-urBSv*|$wi-g$g_3CDYWx#*3Qpe0#ZP|E z+2QA>`5mhwq0js>_gG-)hF**zm#y9SbYufRxltc>T-k~*O3j&mRWQB#?8-;~j6{o< z0>84j4IUl*0##eBAz@?&v`MeWyCuWPP5KV?o|B`w+Y(@;lO12}v!2B$ro*q(`82iM zk(SI^%{FO&$35<2A-Z0j5_dge7c6Y)b!!GxK@RCGHzbLwMN~U^C@H^^f=Pqq=~RY4 z$ottttm0S*{GtbjFEen)ir?I~Go$dZ&l1vJ6~WvzeIc)WDco>B<{mT?&boBrE{;E3%(Oqh6&UR^X`f#=Jg~Rn7h9YXuWrz%m3_bl z-4nrLPen@hNaZ!mOZfCyFLq)|G*woHQOoKev|K7<^=^YPC=aY-HJPjNN3M{aHl9x* zclJ@l_3gCHLy}osm#1ezJ7A+zBF=w)nAAIxnT&xN*zZpTGX>b4(a$F!DVLTeKbhum2Hug&bSS{#dis7J#zJ6Vx+4_2J{ z56z9VMW;hoi*?`}XD;k+d&0M{%Cs%$Hz1Ww%aNv0-Km&;Z8M!w{)^EKxw-edXusb2JX0v9*t@AJ7^kgj>_Vyw!O#75ds%OrG(#iL>DGzoy1a! za?%j|k--*|nA+kQOg6lgKQ6B!WVdHi&GO5ljH4iT1>i*05UK@n1(v?ilX)A`7 zkEXd%udzqUkp|Y+LQQ-q-QE&Mc2k61vfeWIBe?~3@VC}MSd4c@KJCbByF^YJNDr?s$&L!QdX6JDP%8#tsSW_VG31M9fu$-bG&%M zfW3G=gEakBVa38$yy2}y9xi`yOVj|8tx6TXeuLfl^d61VwCK{haooDD%V;JC_}=py zi+L6WBb1Kt29L+Xi_B?ge?bk+?>xf-fra|9KMrmRId<3bv2^w5F}Uks#Bxst=9nj+2TkNCUV7NGN?g?Y4{6MA-M;)Gv?__bsde01E1W5YD*j?`|p;J6!D zIXW@@^S9aNUw_c?>uz$2?dMxYEr+k?e5qpHtk)7$F4rabyJ!cucr>r zT1~k8sjqO3V;Z(SC}kD1AJQ}%Ntk5v0dHU44pTOa#KxB$;=YbJ@?UmdwCs2^ESz!# ze$G#)-r+iQf3d(>`u-Z?L|N>7O9B*JE8#4)!%%Vh4&LlbD*LUrfYeg6*qqsZBJ2`; zF7*Ss2hCj!Bpt!pcN#Y6q>3Jh?(@gQ-fVh(DUSQ~k_8@Bv^p2Dnk+-6bAK)|{;=pZ zTUs8^oiev#;xv=1MSz- zDfq@&HpNQ^>R%SKd-`uMqzh>HsiUHv!8!b1eHn_LF&^*PS<>PuL*VnQ3_3smET?0z zjW%3XBNYW7m}*qPHI)kNB%N9&={N?ioqLAz%l6~u;ks;4sU!`%`IIG2&190I3Fxz? zlD}2?31>GR#>Gc;NFk#HUgWj%FZ3l)d(lPE{IP(3Dt~1~-2&_Vj|V#|Wy(s{PQk|} zJliR)!ZvoF<6bT}fsGkoSxEAAcJQ<=4SScz0tCHmb%O#$9MVCn4#9);)|NKq9pUeI zM>8X#r*e7X^Qs4LT`|~6mE1NJqfOgYD%hArE8U8PZjEQ0o>4KzWGD)ZlUZ~kCI$vN z29lOdEDTmqCEw^_R!gF)n2%CByKQ%dnz9o~>)jh}s?%Kfg;V;K6}bCUFq50tjM-gQ+ojClKnz1;K@t*=s+J1)Bs9q#j z?O56}MVn6jnm`q;QY5p&nY6St@Irth6#wmFRyTXdz_&vx}x3mM`JIFIl*1VIize>%rw$o5XeR zXEE2}T{!%@34Qh%NYA$1gp}w`HsSGB`cP5=64@rO-ocah*j*&kQ716NIT;*IPK1M_ z-!fN;Z*)s?Bv=pZ!&`fHa1-WNgTu^dfw`DLPuzx6YkCHmCtO7Z!*Z-sI0B~4J^0`3 zFXCM%gIVeKG_c(uZDe-g@mQthsrdlZ`$I<#JcJbJ|rfW?a584;gYM4vx+?r(v-0(H8bI+^V? z75Yg-rQkqIF?@JBn_KEN3jNkM^M1W=nBCE#7^z|jH?k%$-)FVxF*cj0B15JOp;11B9UaiK;#4%j5{&x*#vlZ%ODnRX1`mrde#sdC(e_!byx ze~=QN)`_AXSb(*}LCU%<0e)u<>F9xWCM6lkzKz(5EvhorTaK&oiLOh8+;kqL_`8Ga zsZx5W_m{U{6@g#-PT`O9S3&Pg0?hPD5ttJ(wD$FL_%>bHYRTblZrI&YaQb0I#qD=7 zY3psA<`GH>Ezzu`^dN2UzEg5BqRc-3VCU_#Vz?$3bNSpNP! zZudUVt+q&}rR)VW@tP0)=e%*rzIxVa7|i|L>WSUeGok*J5tq4D1!87L!x)#3)acNF znGGkPz&07hT0x^{N8YLdN2u!*jW_c{51`2gCRGMR@Y> zN4Cr7Bv*Ldf--Ffh$e+3Fh`*SNWySA={x^GpFUHXm9>Mty;F+MoSIQ7Et(ql4Teh1 zJLsaQ!qSFhqqar7-~&#frJcS)Cv*)uE9dbea(j8_1wx0;@`tFl>mSDJUFP4Y4~Hkm zB)ExFgf7i@M@f2U9}DnwWzMdG-u7St#Qy6Pd*LzrQUEVd^VhJNU0Dv zr~`FO0wMI0;2#XZW4LxD^ zgj{JJ-RbufN8dR?8-snp)9oF*mgqqjT3u=CwkO;<>tOMOanm`4S5v^)I*L}mh{M|Z zFT}+!@=5V~GcytLFB;2q`Qh)S&@nnrJa4xP%8fDL@0}8u%9}m-Fav*FD3-v)z(yw8 zpCAgGJ(H#uI)ch7CDEJ{VNk2C2cQ40VqRw>1YXY$7S%t7m*Y+7u;8JQIlmUuFMqbq;&I`8u7bA4}GU8fg?8PIr#1pkJ|zSWeg4s+mge zbWzZsx0Nf>ILBA)*po+auqT{3?kQv{2LJHxoG{7e|uB z`!?n=`<8>Cv{LA{i}hi*;-=Eu>zl;e<3`Z4k8)txm?avqz>z}Q24QWE;K6Q6Bdd+m zs&23U&c9X+Wa>J>aCBrA8ibXhmG=gLLphAfcPC-eH95*nk+dA%{252BP{dyg7D8hG zZi_W@XNW(Vj$y`6BVjL_0MlK>xOmhIDvG#+ZpLjG5>SHz44=6wXR|-TJE*8Y2b6PF zA@5}wjyygXD<*}L#ztSDgR{x$wa6{QMwakoDV5whLad zd47ohCIrxh$&2_4D|a*Zt_w{0rr^=C4r6DJ3jC@iBM29-x8zU%&GD@fQU@;aXsiy5!L~eI`@49|lhaF7sUdNXlKf3QA9wp!M6KFsD+B1>ckyq&(PIeI zdSpSa&|%jx*a1f7j=;ZmXOYfrWD^e!fGXqf?5p}s9O@a*p6cxZ-N_2Dzs~?Z&!0#$ zmmH(msDUt9eljy0SkL4-qxe)GY7Io9=C`nzKG1G#aO{zCBOa_Jt>D z`R_I4lpY69CLV&TB=~qGXOQV0JMpgH(BH;9s*=$FD2{}Dcgn$j1>^7XFps>={$h_zV_Y|W2AgZFmuV}~ykZ12See|hGAq8-LNjpy&@ z7?7dzPF!hgX>Q1tqBy9MO?)q7HN2vkRr%;bQS2*Ay))Tx2gdR%{nWS(GHan_MHG6- ztFx%>!@&7+k7d%fbV`}G7w#?F1PdP5@%OFY;iH_>EYfNK*cZv-kPm6-acF?Z_wN_J zeZpS2p5#J}#&<1xf>Y_-$`N>)8wLkArGot10zpe~rlITun|Gsvc3q67rpY(y92v2t z)DFAacaZLZZ%p~^AZqX(LzWM|@rSG{xi6wyY{%{@%J<``EFuPPRUQZRg+KWQm#tL0 zZzjDxdkzOY9bt9zmLYCNua6%n1)pNN z`yZ1{VGFC;F2l@*sZ^(h7SOQ)@3`z!&A3%yOTC@aB3`3r&$=c@lAnaYC12xNJzL=N ze~*!7w^}1$u8ukU>3M~&rn2;{!4aYcKeNnP+$`i2PC~@58j3SN!FM&QQTe=Nnr+Yo zNA-nlwA)m&3(2Q4pB7&K;7vSo){fsy|5*1Wd%Cbag-ZX%z=#@svg&jI3*jF4T_#V_ zMYW`9qd_UJtSO-(0bib<1&enk!GW$iOuX|SO7$9WW7gMF-H!w~a%m=bPrXII2ilTe z@M|_?vjhda(813key~YblIwK&j6-KgFFV40X-bKMJ4N_}DN ztqiVUhYWaBNAu!&OMOm6-HFH=_H$mT|Pu=g2DvT|eU zGAcN6brn=>S%u-BJkWRgc6zz7R*)*BV4sdH+&cY~D|l?qP5)VlAJ!(o#qEnJ?%W%? za6_6#sGZ_3dA((tle2lXkC|v=qRRsFdichWY&LO%z~m9QNRB6>!L2Mn==MI!O7^X1 z<^t~^;?qBD+y8|3H4MU-Um_pR9qOT#3r3;U-BSJoW%ANt$SjkAudlvop^c=Fj4j1}#UW4uX zI93}Y4Y^|SY{S5KT0Q9Q<)KgJfX!EDxH&xwysLOpuo5#PqyN~$BXdan z?>H(vd%_CN?I0P+p{SbT4*^2I>&2(zF|FDZwAyFlbT1j^o#-j{rl?7AY(U&#R-ocS*H=x2msj55j|p*bWrrGPZ(;)3BNH(; zF&5MQ3PiIS9pL8-L;o>{pu?q{xdbb-ccpvao{c)?^`ukF!!1y7AesHFC`7xx*Tkd~ z&Fq$s2G6ZV!spkUyeg)GY;Ou4nxG3zyNP=@CIq78yZOG3SaKCubkR|}==LWiy00ok z5Au4@>p~rR?^UIRL?%fA? zENdiA?l?-`lB=ofrWJJwY?viFXQG88s2NKCy*_5pDFV_dP66$%I`-@^tY1AlQ}WL{1%2 zAR%`hQgSEKC7VvpUi~1RX|shq6P}wMz5+ZWLukw9Zd|`R6#w2G3Fn)Q(X4@^nW65m zBT@sLF8*XLhnA9XYC+M^I66PX3qCk*<#p3mV1k<|cy%RFa#uRrR2~c2UF{-uYgzFt zJ9DNwMjetSe&!d&%0S8{Ia(4S^dziIg75Ogqy%nd<%K46)yaWV_k5X# z-l57(1*&maEpRcH@=Z1?s6!_iUdx??_<#0v$L1vX&s_%v=hwlaA0ki-lO=!0PI7c# ziRtq$at9t~lA*eg5fsm1Ua7J)%dS*3Le_+d-tA{$cQjCEPZuP3Cy{x%1de`rfi0KK z!U$h4TC>oU97>aDijqD22&}_}9;KvxCfYz^rBFt;U{#AEyJe2Du|DEf_{H3%ltToD&5hGHu^s+p7nN1+$ z?o{|ZR@kR(uVhOG-{TxNDw1qz4(0Wm;?DOn+^iMjsCl0YcK;aw{_hN+seeD1FUny0 zOV+bHGxwun`Xx5sM-TP|ujT(vWt`2cS!8-i1*i7-f%k5~!{vAuo-X_-Feqa< z4|7K-nRtqSCs%JN?>z$crW!&@`yl>moeaoXZ{sb)>iHe-!g$Z-bm)8Bz;5^Pyi30= zR4wp^?+#Tkw(2c*ckkyG8iZkAsU|8;)TNbkYr#EwAf26A$gMK|E{;h}r{n#e{LcYV zP_G6S_?*pM< zL^G6KI24B;^$jU9w~)TxSOqIod_cd*3_OPU@Yz8jmQCeGT;8Hgw&!`bD0Z_vOn>A| z&q^E7#Qp($wtP1p-!+D<$x(!8V|RLaCJs;k48%Q_exT}I$L8*f29G{3&}jRICyxq! zS|>6veR2~oyHFOyO0E2}-ccmkScY-N^3?m*iyg48V0MXWw8W(yf85QZ6YneV)|wb@ zZd|&sxBI|;F46&w$^d%zLKW++N5Rrna+D!&!4Eiol<_~qa6xnyl`hgG*>xH8H9Z^u zXez+1f5FhZS(jv_=ffI@XjbstP{_OYvuM}3w5#klml_ZSL8{jD!tDaPERG_Bu(zVc zI#O);M1N-9VZ`RWRwawS?c_Q+&T`084|0?SZmBQ}937>=Uhg$#qZdU|me(~XN*Ks@ zo!630KM(g;9Rfdbntt>kOt<^^N^!oCv?>C2}%By5Qs`3(vG}GSR{sw)yN( zs&37v8w=KQy58&I(_9(aB0pu_u~mIk2?w9Dbu`2tAGYz>n3gk)cb&i?#?3L15Isp~JB)V&b{myX82 zrH45AZDPE?xr#1p4krcQZqc0vz~kO4z*$2X!c;;mdT))uw|n=KYUp7a`bUArPmIFI zW>w-E??6Gv4&0XUh5aoZ1;@$~vDc;yt=ncZw?($=`bcU&FqkTye-MqS zu_Ou>I=@^l!Nl6HY|cdBLiPti@rq2y%1&i36+9WN@!=)*&8OB9q3h zNW+aTfs)&La1r?QQ$vXz8uA_%P55cK)T))akA8|vj_ig5mkdF#Bc4i3Mw5${8k@E4 z2{W-R6+d{hkRm_$vyjq@XnXM~ixu+J_icvZJlCJ-f7p#VN5{h^A)jRKAf{PhK)%Wr zyq2-Lpx>5Z=uRED)0~TkY&2$*ujGj$4kWQAJ^ySmE*jymBcG zhxn$`!IYOQB;^nOIMfC+lrDjo9|?yiT0qa|B`~C0g?8m_q<~+(=sT;Dng2dO$JJ-S zAWMM4`86y^ZzydKI19(YSn#a$iefVt2)g|!_Bo}4b-mh8e{cLJl8wHA2^DAP`Vb$o zFnGq$@*BQQUqFrWXJKl$DjCiQWM&_3QEi+nIhQ2TmtP#Z1bA`E(g?wZ9;Cgsjc%up zgU*sjSo(4!7iBw=>aSRXd}=mc-{V5x?%l?bt}49K-uo=tT>*}l3LH)uPn!Jv8m>1? zVB3=%p#4k`s#i>9{X1TBIn{$uIwO zQb5T_98$OvI(>Jc**<0NRL@yH%3lFKm({|;2^;bLpj9^s7ITkYd6noHj z43-_PU`o&Z>D*^arf8D^e>NMzr;CIB&+`Qv7j>Ewn8(kt-wRjT%Blb3REi5fD|X!x zgr8eV&{)Qp@ATNue}5ds4AN@2={**ZJ}{D>X0J|{+`MTGC-5)z7E!jl0#j%o$TTj# zWQKKC5ae)%g2L`uRz&{7jT86KQE@1gI1Y!DeP2;6=N?+Fc))hD4)*@p7T9mz$$!(^ z3On@#Jyza}Q&&C8roUC7D3c4gw4ne?a?8l*KnN)4WI)b{LFnDOofaH=hHm-MY|}~~ zHm>Xz3)aeITDh;7@ue*A{V)U<-o4DqOqakr8-WMoE@^c{@I{1I4`xRa4XILTHHrfT zp4hES`n<7$)4Y;}8Ow%{O|t@5-TVR1TMvP+={K>+E}rua68wFAx-{NlIVrt6K-Jlk zsCwOTikvVC(k4s+znbg9&p1HFcamt%ym&YeBLhq3zF?v^x~xKK0@SsKz$BYTtnO+c zJpCCjUe(`-f4a}W{^yCj&qir5IVa|eI-BvG>2f^KbPIA%+~C>YkMO2GRmh-J(h-Vf zCoK=NSF*oZ(E1oQ8dW3Zxj%!S zF^&8u7&EJ#k~}nN_AFEGZM8M^34WaU`wgi5Wi4q9c_1>}DR4$stI_A)3_6^5hIwUg zro}HE*v-w>sCGM#+v;1(8UC6>%S5Lz>FWvDJ5Y{3&6xyl@0_5%uanE1mqeeMqv+(6 zVZz>24Z2>niIkm}T6tw|gUgErHtFRUkSKqMZztx6Ld_jf`{p&AX7!kD+ABd1JS=H^ zRlMNC=UH>JEA5%K7Y;<(5ucdKIMq6mbT^|ncUQx!h77)U{55R^d*B{`3&Re$HdO}7N8N-4H6 zPDD39WT9&CV8J`^f?K+N4=tz=ym?^-BI^_Rtje&8RenFgZCRDWom6_nI~RS$t7~48 z^UD8GbRKR!zF!=#loAao8mOc}Mp87MbBctFtP}~Qq9S{Rs8mX&osbf#L`6x|b57aW zWEHZq5*Z=n%kTO90bG~Q<@34k`<(at_1b|~N;c8@DH2!2GX=9B#KWA~nPeP%Roru0 z1{3O6V;?6U8ei z@<<^*KlBC?cNOx~H$72hu0GzIBe{j$bQlWtaI2mbE<3bOeq=v9Y6~Pg zeR&#Y7s$GXMU=bpt8m385b{jS@y(kBcAjh8>E-qFIPueVnmubMxAd?O4w%n_5o`Y0 zA8^^hf70@Cw!}5O)N+GlI+8_&>~msw{VJQ5r_u79mt%7o`QS>Pl;qQ?&^vP6Z zrSe-OmuJO~y$*_l-}i#JZSHt-dOV_cKa_hZ`J*bMxQCgvOAL4i3qS*pJ>Jg=!=_72 zn{@C~>cf?GzSz=tl(235XkPXFDy5F?g%>rHMI{(WcA+kkS67~Azjww4{Vp|Q9hJbS zRSPa{7$~-MwZu~=s>H=RBwo$UV)O{Rf$>|WQEB>E3hi>AE=zZ)F!z%XZP?o`c7_2I zN!grCJ!N!XTSbe^ZqdCH_FU4i-~Qp&lh8%=hj=2;3ZaiC>buI|poDNp^c;u7t1@WH zQWMmXEyO|kBgH=( z@4kJ(F()!=FJX(X6TwP$oPEeSPr>PGC4N-Xs4m4*;)A}g#ejxzbo;R$7Y809h5WiGhDIH9L<6NU_;33FX&*lcUYy$j_WgTeQnFmN-f0UyAVs29VG71Is(}S><5Qnx-^W9CRdrT=iDN(IyRUe#OG$ z%+t8kD+=RJn_^7M1<|afR{Xrd6&8;+z~^@gsy9}u<7Zu6mTUhas9n2((dh?SFbl)B zSCfPTXVbu4dn*sqnt+w=B%LJ+{C8hDgjT$Ur{&9N!*LH#Z9O?@H4Ht za~D)zMPhej3y2ObmS#s%p24sJkFUA{XJ%WY^HY)jwalQ%s^b`flXAa_NNv-52i|F&FH^z)N$GclCQ0Dn3UfuGB z{S02Bf37-fH4lcy3lgWT&qe!y^%|&d)(4kJd}fD%dHCIBCzn0^AoOg^fFE9Gp`%qD zM&`}HMY2U)w7QfWOJyChTD$PXTPA3K!huIMT7mzvlW4f?0DVZ27;61guwP0cwzlk| ze-!f^=HY|JQk6Zjt(5y>l{H^#h-8X+P zSXD)!FggcJ#1bkG*vuE;v(OsY2sWEkc-)Lc%n8zf2ha0y!j=KBysdjp;n~k@p0Dg6 zu};Zl?O6MkwrP+RB3I+?KN7#{dh<2W3!YxR!NIRxAwv*w=#Ueb+{+WzOeK!J|Dign z_P6-zv_JcWzJV=S+BMh1e)EGXFL9i-E7I0h60)Torgh~FN{w6yW3DBFFl7q%yRs7( zxcA_N`;klg9pokD23VNinwst2{Qrw5tQf_IibETAtKY*LX8JJabmExP! zxztoonklGIu17Q2==j6a^o@M!kBmd{`U(8K%^ZrRHL~+oE!z2D2?vZb#hnh)9M$6# zdZ{PkfW0$?q|XIX_gV&*Y@CX7UT(tSe$H5KDTA+SRCv+pc38L~mHV75=8k&>@CPC| z`qWM^TKW^}Jz^=}ZZ9^C*X5yEdyqP3;EL6IF=e4O547CD6UJ-8V%gpf-Ro2FuZ%9c zMMa}-jV|9CqJ&TQ9EW{5Le9$esOOZ%RTU37=J+rSKDnE1PhEgfeW&5P1H-UE>o!~s z>q{m_=E1IyVxoZ&PH9b{!i6T(bSDiLP8`P$*YV~IQ8ya46SMs zB0r~y{b%jO(ryoMSC7MRzwZ?ItriZ$)iQXeUx{BU3J40~4@vME}47{Gaf+Ou`(X$;{_~Q5jTsOQ4ePlae zg|zS4o+EXOPG(SGwie%xYUCS!0XVbfBlS*u3-SG0;l=qpxR@-JmB;K7FFbfn?*{j) z8PzxsmY8jX@%E|s%Qu;4{Y!%6o1^*DeU+Mj46LJQR|~mo5asgTKdEZbcW~%hgb_aRIuWzltk|x5FKy0lcNW8!k&K;aR(EQFi)A z;>Yp4eW@%67AtVzWi!DvG>TuZR%DfxUeI#k1eSO0N!N0U>G0F1G}K@-`cLdipUV%~ zzfJSOAxFb$((w?y;!9+)w*aruSsIwF%+V{P9j98kctS0VOm!!q>EKZ6Q57gYs{cci z^INfPdjOu?7>w?+QEYHp6=$xqqAWi}&RyVv4Z}`zv=~U6UWVHjZFb|YUDndqFFJfR z<}9pRY|G7BXK1Zv51uttkyjpb2cO0|F=TEYJ}+y8$dG09)UuAZZ9ae+7xnCxsF_O{ z^A(hR+fABx539cXMOElBHVnr;E8zl}YjDczJ}#K3#nzC3)*FBBw#5^C8+5`e>125D;dX=xORgm;BDyKaSMp&TsbO zW9>_j)*>-Sm8QTjsUKJLIEALwSo4SITxgeixp`jO=)GDzB`z7vr<7-Ni%l=IO};Kh zYlI7@@Gx}--ToT z4C1`~0l3*FmTmf7p@8Jc)e$~Vg|$t?@Z1i`Z`5NiEDs*UL6%NX>2?5iea{d+o8E%Q z<>$p2?)%`tbVVMM_y+4!eEHp`Q`Oena_El47Kv>+HA7#sa`Me}$GPdiV zgjH{(jNecVp{vPqzC8W7G=oedw>7prwaSqCrEP~kyZWOJ`|!EQa8j9YR0w?=BM5O% zATYrZo=AJTZ|Ao2JN0vDYG6k;#)pO5|2}|xx58@A9ms!!#*nwxMp_w?%*r)|^nFJ$ z+sO`upK;!3baD|n3<`sh){0nsdI>3ja8pF1nsY+#oU69~$20lv`)S z?2JKt`f0Hg_A04<*m;zaW{l=(Q>$T!?mW2j%a^KWt;Es|12}%`7*=jp#IQJB9voB7 z4F|Sk)X3YgTy+SplYdNmlTOjFxwUZUjxla-wgsD?PH@Rg3C{$Fk@<=o3_I@3x(Q)C zdyvF}x~WbpT%!4S%MMhnJ|z0)T&;G~E5fL^ilX80$=JBJigsJ1z>Dshe8;NrjH~Kq zzTM>^?e+}AOLrs=scSlFUXpTI2|w)@iYsx$`OQL~3A0#pR1*5s#lgy1dgwnp3wJN< ziZjo}u}_?owVHhpYd)w^zoSR0)>-xsD>*X<@+2A!lZv7tZ@wP?y$^WzuhjAGu~=Lry#x1cR$-6S{>&wRVP4l#oE?-b zzTB~$)?7RxXdeG5+?qWEBR2R8zi&T)_jbW>Wq1uZ%#Wiw(^aBM59tOmFomZntj8*o zSlZ>`fE|_&sJCM}E()7N@5Fk}oTz1|C*%vWn=DDqUk5K~=W@f*db;O!5%MJ#PeZYy z!~8q@xqj**2;KLE)XS3a(~a)%C%qr$S6Fecx!XDBcL*rXnSgB-8Tiav%3aLsh08tF zcv6)K$Mr10wXJI0efB$+xuC_b?v3H0065Eik}z^p3Eua-M0ynwU}U`%3cSZaVp9S< z>L{+ZEq@IkVx-Px&IeK&K12w-J%GEm1TrjEb%^%-!rf}zu)Hu?%>CL8Ps~h$$Zywp zmtnl1Wgm;@2kGG>&liIBbRF(ky$&M-??RqNI1U`z0Ha42Vwbr-G+SmCf4rcBe-9XQ ztVOKw$`x3;zC6 zhhlOxw7e}46dUH@)+T+9NiX3gH-c&9%~+g%G#wSnT%}H%N%id_b$ZsioP*sg!L;O` z`1aTYw9r3-!+s=6cY?uux1|f!4>pJ2;{+Jc5(O74OK_Ns9JC~)@YiL5H0{?o8d@#Q zZ>Cqm%D5tIUpo$x zraWliE35N_v{EA+<}S-oJ`JRu+68|Y9ED~Xfj2j;g7QHze57zUTv3p9I2zFcRr9;> z+JXxEO(!zY)bBp5ZH|GW?lGu)#~G6p^8}kZgau}Gs4#m2vxd|KR_N0sIK6 zp}KKt;)d`!@HP`@{I73>ZRs3gt01|JTu@Ap8=TW%A@#ns$lim5ksFil2cwb?6>`o3gcyKY~MYFNYM|b?Kut;o6e!9 zTDItGG!5-GDv}Hi$6>M4AU9_)_W9TZdJ7g~N$miZtyI9xhDUI@*-F|Gg zfsphx4~iTfU`f#;!Mk}EXatEcV^btPF29DKhdANg6|Q*TS_n=aJdmRU-iQVXNf`fg zE$z#{h`NQ#Sglw0n!jrXVywIs#?_Y7_(FTSv2mTa`JycExu=6aYb;@n(RkrXc6V;H zk-oF`i}3vZ=VC#AAG+`UlNPfYc}FSojqqY@=&sD$=L~kJ*|Q5Cgr%Uqif!frhzbz}~bHjJglOHxhSjQvO=Z7^_CD6JNrAy?UWIEDBPFbmd20opjGV z!#-kantgiIRdMgAl{ou`CoPb-=EGC`IBrxQ3UR$cFOr&QYHKxX>%OE3DMpwscbl$s z{6{a$il}q#WHhb|=il(0Vu!^-x=k@`{C*I>+v<|+=wrNV8dKiYJq#0M@rJ4oFR&~S zXPoiG`u&yMb9om&-;yIte>{@)I=a&v69w_Z4ZW&khjkswmgd9D;37&$a^WfC!Z~|M z7KA4sgq`NuaClz~uAi;Wo0J}dx4kxeeDB98#X;0V%CEitnaVE?m{Vh7U#@drPp7;R zaY&JrDd`=D7gk<^wYuS={T4?s30{CYYH^?@^q{@E3V5!M5pErki1ow9;G+?taC@h^ zXl5gMa$RZm)zEw8X&yFi2k%)g zF%r9b^0;1Sz;}^7HqT3i@xeBDKxaQ%$(lGU9yt_>0%O_BEuZeJPl4vAJ-Nfu5$?>h z6OX_3=lGN%xb0{>oVeZ>bAOlf#s_)y?bB6So8JeNp7!IFv$w#+PpWLTyB0o$7~!^c z#{AZ+1XlzPq$h3lcGs4hU{1|@c)s`;+$ohK>zU?fEFd^lLTlD0&7a z7xm~%$yg5a?*U)@3ZX((;!0Ipq#CE)M5!o<_O~t6@s zT_G-{4C2q<6av>RgPajD9PaZCwPu+iq(ostvKg8eH-qh5N1naO4co$BKta$p+LD_A ztNhEjY^FDV-+YD!El?BNTyyNlJm|*CGOpZ|m;#mWqF}(Ut9Zi18Fd4_NKW3HJbKL& zH_r?adtN-tqpw}(I2&Y-gf_U`$Db!`T`$gjrOf-|19{x%6}WiGSJC>%TfwR_k7Fy8 zap{d6cHxcH>K>cvlzCcjWY~1YRaoNLcFF6s&5^R){+yz{$Cbcw|ye{ z*#ENpRBO)6lX&SC@8CmoOZ9Q@C~T5pDN`wr9cw4^v$f`=9+-x?dNI}8pX?IfO=bzz`ZdH;@P~3X3vuDzKWh0vvn$OR z`x7ccvnfm{=h&SKcrd8J=9zP=j($$%MO~1ZOr(Fuio+aw{5;q#iN%|fZTQPyD;_eW z3+C?rK@X+#uCqOWQdhTQMpiu-U5{mdGX-*;(+mba{dvoX|L|Z%t%{Q)6wqpr7)-B;f zmwRL4Mag?FWf4xs?t)Er4tV;SI)9t1x-C>m)=dybYSSeeqJrel>XK05UXF-ngouUN@Xa?=F$ zSQ%X0rymAsP2+?)h<`@kroT>`Sbubb*gf_$PJUWLT}&ooYMDSmSCBgz-_ewPvrx#i zW9O19cx2FG%s+RKzZbOAuk|9Sz08M_?U$?8r&nN>=_p>lJc4C@%Jbr}?&yATAVz%% zr)j2-A#G*?F1CkG4bwi6d_1A4^}`PSI&Edwl(%vQs+Z!DgRRtDW++Wcx-rkD$V%0JTB z*g5oPy0gQpGw<*MNAi@-h{tzi(cW<*L<-su|8!r9?*EMO@9L#IcUTL_O*zD$2ONW^ za(i&b*=_WR-eZaMezEcyLMso%Q?JAu)kBt;;g1dj`Z@juSF|M1aE+~YZe5On!f=!7 z#U|4Irb7kuZYDywbsS}uI*YZBcZ*hOIxzC156>JH!9$J4vW8O#O&BzlH(ZXa4wX6* zc8SAq)<@}`pO!2sqFaR{n@uRs;Y;Gm7W_>un;FwUL~+QYZdIQ5x) z`m_d|P5D9l$LZ3qQMH1}smZiN$_VaB9)*Ka2vd*wp~)e6l>aq`*Jl5zp5Cz>-Km7z zU%wNlpE}PDo3`SO#6|4&{U)mZ*28gC?N~$C$aO|38|Oud`!+U0`@`MPULH>;pG~Cq z9Rne8MiKl15sI5{bM5E^(P{Wu!TL!I^g3Y8H#WToohB=uk@t*OWi90NNnhZr$~W)3qo-H{-=VN z8Osx81~|-`C+*1(9K(i@XQg~`zL0Rsi2Ux^vE9xoRhp99^ed;K^Fd>x3*F#npKcEI z9|epvT83J0*I`BJNnYo=TWmkGm<3foFbK(oU(40#gw(Ti*0sY~Rt9LXJ^}|{+{9hN zobmDELdf1%BUDUXg!*@XOZPnxPS1IS%hsmCbkB2ezQqamPf>Gl=|2IZd*xA8csxzn z^bhXXkHFy*FQeMVQQQzcjW^WXpy?w^X@E_t{j8K1U^i%hL-XTYDp0WCREgPE>8wo! z^RHpVxAFX{rv>MoG#9ECKZW5xib-MLb(Ekyl<1Iw@4I%F_K0S@OFfoaD_2O&*1;V7 zu@&+>^(afNlB{lI;iT8M=x0TfsK2z6n|h38za7u0`<+)X`|BNoHWBwJSEJu$q@4>c>cPMs+8+Hoq%--w8$;%^Xy?`wS9xd7%4?51=)$gtn}Gjq|&; z)Amj^uI;W5C$iqcg&mo!qmsl!Puzu-m63R2!x@aIyM&3?pNnq&?f6z_0T~Z|z_EHM z@H6eL#Gf>-+S5%7C;FE1qVZdK>D>;ln5I-SV}l{rYV55Z*~^EVSA8U}yK?;M#~C=a zbRxc=9fp5rf5D`ZQc_l_6vQ1XFu+BV-`j>(hxzwJSqom zCd~Z(g6>sLBqH3hEC zljb7pufh3TJ9<<*N|-Os03A_*@0q=!e?DJn^G6q0nB__JirVx{J4yNuh2wuSZ;Puh zYT~VtS=ju^QQV~?b>L=BBcCU4=#s?N{L~yp$rmTs6?Z$0Zf}l>+v?^aY8(|uTye%b ztEHWd#7n%aa+#Ahe;^^p5jx7F=x9tMEw7EFDAOvKKej>qJ3>_$v{fC;jh{o_g#Pd^ z^BTtl=zy#^I4v?6|8WoTrtKumBWr}3nZ3N2qsxrqK}=#lN{@fvszDczZa&0 z=`V>znlOuFZY`tB(_~RP?yDXTdD! z@Mwt#E6egeR@l!%h4mDjarGT74EfxbzU-O|b;_q;V2R|7(a_|;plWE-=1W!O&a*LyEdthQj!mhL#gY8eiVeJ@P<^d1styMuL? zU>>O*f=lYcX;_g1+Jz?YuRd?dTviL+D{qSpSFG8@Pm7Q0@5Jf}!^w2dUbyt%C+@v& z181ykqL167NG>Z1>s*!j*0vBTxI7jwY?029eHPU6P!Y~w=zz+I1ZnT}L^y0$MqT!r zR(rgY_Q*L=wC|h=6dhej=X4KM2ToieI^U|Q&b+6>za>9~`@nd-K2Zz%|9CDu`tKDl zjMc;EnuVxrIuA_l{Q~uQLpgX!cT@%^+!6a-(2I=`^6zwaNcySGuT=8k(WXhC(!CN3tl}msLJbcvZ&R19C`<5y;CtqfleHuII8OzKi}$hl;sJ=W@=DG684@R zWZ&E6D1_NKVYlz@d~k}TaCqZrtpDIlJ9Wn4pi95N`Fa;rNKL01DK_X@s4DfHPuqJe zEu*O6<<#D>2JCyKz}XNr^v!F8D#J)}l=_WLl>_+pq)7HiKL`$S^LW$r0!lCG$p@cj ziM}fxgL(9$Nf>IJ z$-U$9*)A;${ub}!AR}XrcI{8j1;@a3eE>eYznn9_8F1e-bHVocT(QId^eZJcjNVMEjYwp5l6i`A~Z-2EXyN- zFmHGrem-%G&2qEwOZjckoBRhBoQa1nmh~VEaUdnx6zY{!Esje6je(1D;oH|Fc(rjp z{t0@|gTFVS&G|9t=px4x8?-_GVl$+in!y?K3P>$;Ee^|Wflt>@)AURaDK`*ff96y! zDLE`fvjAl*^H8Og-pBC#xxSo8W&E>Ug%f*7=V!1Jmrc%q{^C81>(~tM`dDIJ^CG^v zL*gKXzF@w<@S(O3>ExTz+2$Nr5G2dhvmQ|Ixi_mnjb4g5)>p)mw4>bh<~%N*zMdbo zB;)Skox-CRckJJ(wSvc=Q`BCQ#-UBO_?P73xE&<9Zg(!n`77hV{?HQgIerx9Py0&o z1$(&sS6}{Gmd%p_b2+%%S5RCeWdy%Jg}lX#ujaX-TEcvOw&V;P54DvTfVB|3EgRFf zeW!2trMzwGT=C$hIp`^6vSdwU>|_JnXk2I!^soOQZg?_`(!PYl=rwgP_UK3mb{@u4 z7FrAAv!`IXSsvf--4i3#Qs77KX14XTfP$&6Y~S|+E>0SQ(Stnk?Tp=Gz}A;Ea?@z6 z-l4?POa6!*nz!h=SOH5Go)+7!RC(@_0~8YL&o5_&inaHHxHc-04Mb;NF)sq#t)7tU z5?8XD`vms=(1UafRaSrM0f&C6^W2ntbZSW<`J8Z&uUbmU)0A<%^-P>T=OL>tFQ$9i zm!a#l_Y^wKs(OmV1N+)536td?kwvB!7yMP`j}v;-tZ6qC-|dNEl$nm|n*(T)sL9%Q zi}2oT3*P;AEEn9ASa6A1)IvmY=s*reDPWtFg)(}+_`%$gZ>36pZJ7$3>mJ3OT4P~q z^9`tJpM*W_=8}}1;)z+CaER9_kQq6Pk52qZlM0GBAw(PJ?pO-lRwq*Hf|=ZudJNX) z?W8Q33L$daR(i4|6g5&jxZSy1P0^wn+NqD2FffDduU^H1%RA`TtNYd4SM9*GgM)F} z%X-`T3|Gu88jQP5kK&J-eh~fXG_?h|^1QM^IH}kHuiaXVK@T0pVz0X(Rh*>%?~kc_ zaUMxPH_DB#qHE>gy41!))e=6IKI{Kgs%4S`w0^Uv3*=*3qG`1`=VBzh`_ay}f2OM&gf zN`xbm_X{^oc5(KjuJ}eS0(|Sok~R3E?*DUg$0WAO(@50SU5i=|r$JfSOX%n$ib_pg zSXlU+YP3s)s4@Gv({vmwE)U`4gBWTo9EBP2V|nCyQ`X-~bQbR}z*Umx`;#^~r2$9AxS&B|Xy%(DTblbe@$(;T=a% z=E4ss`6G}0Dp52{9FNzt?|_N!L^LVijygZmX{(`;kd?h3(;}9`?Yz~{D)BVjQWV&v zT1|9n96=>W!vp2Wr` zZtUjw2nPO;L1ncDDl^OALgzfX-gAs7C+mxumggZxe-w9dSVmPh)5MJ%my0Jy>=RD? z5um*9RSb@p$Y%?JXhXU&{4?!^uQjdkfny*{HrT>*thUm^Pg1gT6yOXy*u1hsC9O+ojuy>yJ}4Y>|309<>fZdX;RobZT+AFCeJt_FwR*X+9wdjJ;GZ(>&eI!Ze* zpQ?B+cOTIooZLpE4yB^-_LH2jdK91AokPPkTLsh0<8h@lvY0M8D%A7-3dTLpV`Z@mtsk(GBZ?Ym z8JKRp=GLLGwnNW!3w-nH;;uwsW(i8Wac0tSib8u=8BX(4jGBFAQYRu8Y zW4iZ*kiwC8D|v5ee8t$qvKZX=v>4pp zo%^j)=Ajj-(D+;vKNQ>t^`zgTqnkfxCjX`r^F8s}J~!?|3baJdme=@YaID1Yf1)^^ zwd&tf*PanLKI~$ZwKQWN8UKvZ0)K+?6+OIo+lS^So|8Q5Bk=anB9L1@nv3_VQ>^<1 znlb1*h40F=pBPdE3qQqDdZ9e8cyy4T+_l1?sq!^LI%?5)b2gma`;aX(E5tQlD#&EU zC-Re^Va>`4=p5ZudUtFBlpHZ<^F|Tmwb659HI5m6flqaPX|LqFoZSjGb4L7as&eeX zV;m*FNdMtHVYejB*L!3bA$dqETBwqw_h{+JinbS%? zdepG&xPd6MZaxgZKblAXeFye2c_2G?0_#3m!>f|Ub7_htMC;d6?_n3g>kzHtfc3f9A78nL4U{SWkm@MN`^J z6`{(wk{)Q}v#6j-KMWT0V`(gxCtG08Ut@W6Oc2Feysmx|0ideAUrZTkg^NE7r!ag-wF9Eud9(+ln2t&=jPzWsTxy5!EbOJ|FFJ;*ujDp=e*Lh=7@^N^d5 z_`{_l4!+n1p@uhT@;-!6M>V|lKH8r)Q9@wY2UfLx0FX)heK<4r1`6eb#ZO^`e)OOgBy**I4bLQ?Y~ z#IBy2dTi(X*P$Ti?t)#N%c*F_MJTJj4l7sp0{yT)tetS1t_O_9Nk&rt{Z4`p-1K z=n{{Vd(8i$ev)N;C&_=DfHO8cf&110{3`1zWFHv~g??v+ydO$j* zbqGH^^Q4Tka#0i$o)XncCyjK~_8Gl{*;lo$7Oei4@qX_`RWqF0oP7YRF ziq|~%3JVS&Y_Rc>dn%@}?6`PKLe*bQ`KJy;Bor=S#sRz+Hb2|Twl3cZ$Uc$O~ z9dt3$X2^SnUm67ddUhq8udB27&CA2H|1HF^`F3pXmxQ^wuc5L2Ieg`F)O}80==01O z|9UMUCCQ;`6*&m}_9seiS`SV=)Ej;dlodyt*`qu`TMRI`0Iq^ zHO%6H=cV73^uhRMUJ;btQ-+@ZN^!NUOpTVJAN{lK%Wdv47*+NVRvZYUgi+D_a*#6@ z^@$QP?@s~kVLrU)Xe}MOn@6MSuYmUKBeZtyPcdv{55D0U^9%!*_G_8+#hM?D#2IboRr7Jc+|T>JU6%Hw(7pmBHHQH*i4h34Zag7q%_3 zTbGUV#k>BSBFRZz&1znyUU`|vt;}p_aDeh&fkv2zHIZP9!|ZR$eTSz z3V}CEcu`ac2K?wvJpv11gS1P#(0&c}B=<*3NWd5Ic4WM|m=D*Mkx|iK`mQyAg5rCj z){j_r)%`(lPFm91!yvrAEoBMjhT(h9nUL7sowaQ`u)mWzFO6n#+{1jVoSH*o`B4ZQ zd5Qf#KC+*kl8CLBcHs$pM^OqoxWAVRC(MfE%wu~|?Jz*W{h_!{e;aF_s1P-#Z^iOi zXCQgZB@FZFjgjVC=!P_p?R9G(d97*_#?QLT-y3?1N*mYU?diR#r{oX_R%sI>!+r~4 zd+)KE!h2eOLOSn;+z^-8=TYDNy=c346evH}6enFZ$L?_t@S5^Ja632zjSjh?-Og=% zOz|7^Yx@rxo}Ls=Wf|bh5DR!2tAMLVDZsszM|f_xchE~Pf~}G({BqGWa9J@?=(97Q z>pJbZ&&tK5+O7lZ*V*#fiV6&OOvj#!x1w%+CDo`7=d9spXzG~CE@?`zBO>Cc| z$Od!j=-aSCFmT9Daqswnm>r5Z=InN1^Uc0oW}V6hA2(Bpx*=E}>`hx@E%|}OvCGN& zO=gCQG<14%r5DpvS3j}nlO-1& z)tB-@n^@(LHJ_HU_5OLMIq|;;-gNm39Qqti-b($cIjzMfOrMIM^`p#(>x?;PqDAUSb=1U0o@J&SRG+BJ!Uj;AKq=^tG$LXod zaf<&N!RUM?>-OEpnLS3}+L3OwBB`r`_)N;-y3XP2?ssAA3k57bV8XdiU8HxW2A-UA zinhik@y2{V_*YX&=iUpbpJym;y};rlFB9I_PYYh?m*UImsx|UM6>(s{`nz~c>Me(o#wJ|j73KVF5d8ZHv&docXc*$>?wmeLg0N4Oy871h|uRm(c> z7msQhR%CWzej1BOlk@Am|tcVC#)Y z?sl(~cx(+NI1d(g&D=?o8k=qFQ;KlL)OPx;-4|spYvJU~^Elt(v7qkii(eD9+0A6V z_-=Y%u(&#zTk{Gy*3U)sKbXkJhfU?v2S3r>g(GqKj0TwTasgiTo5NH0y~L8hHma#Q zPQDv6*=$V_c`Hb~h+BmqcUcLlY>J?AOEexoI)M*4JA%^d?_l0P0=MWC(SL`!VXlk; zE7+@`sx;%jwo}Skyq8=Sty1^mRUn7`apDCkd-&UEi6sb)G|puov^o}(lQ4@*z8Ye~ z=Rj1+JcOd;@>AKmjidi-BM0L`*t>EF9&s6jGY1;-lr38z>c2@ibCJYXtlB3|-9M89 zBCpbhu=kYf5{sdq)o{Ma9`r4Lj1EtZu}zwE4@%5~YXzIJ&_5E+2P>gXYYCljV>;#@ zOUsHLKxdML5K~el+&f{1@1(Ov2(E=G(4wyaW5tUxv zPhT23;n=ZJn42C%Cy)Aam~tP^oW6*YekG%FlmgDTyei~Ywt!W4$*tC97;pHM%tLJ@ zp3bXn(7L(^vDO>kUJk`xhnH~O+XzhW?TUYN*W;t01ilcUjqYyWfh>k$Pm`JKDP^s$ z&50r7aaZX`-h0&D*A3-5Z&RypD$kjBh!*AD6tdEKp-$y2TfeCz@$MCG`uTMi_lr%2 z3WJ?AYpw?Cd`*Vu0jc~yhR*wusy~e5B4i|_tdv~2Keqr5 zG^~X0etLM_QgO>KB|6e5^cSnRx zi{hbA-H{x1hJatSoxq?Q55ZehMMn8H;NrTTOMT`54XVrO-MC+9cyb5*w^d+0c--N? z@-k43PHbtQz%shNiF^5@8J!A)=!NjvtKN9dj_dnF==@;%9oYvX4oT6^?YrQQzp(G_ z`+#~S8nk%xda#M#Enbq73#WXo;7zNKu(y|hdZ}u@y<|9kJ#vSgyV=R&JiV!9XEx;I z1)*DQBxx@ai=Uau(a+dYc$rnqy^0cilYjN0;Ii z)5M|LQ z;_%0hG8((q47VBe;^U?%AaywlwGO27t`YZex?eG!JHLYV+Ki-DLxIOoD8Z7Qk!9}8 z;)cC_&mLTNr>fcKAYE`!?|*Fz$zI3#?gvxp#W8>I%Nj+ib^OsmdN&)tQ=LVyl^PB(&YPquKP86R+*;tuAim;{&YB4Kyv zDQ?D<%fk2M5RE&KPq%KY!`tgda<{`z!NuHime>9nH&wdeqSS88n3%=A{G7)hdOI7u zGh)eiV<~(3^BP3`b5@ypfyd`RRDnCj@oUTkcpmx4#@rLinjO&bVSE1r_A zyHxF_mx7<^Q!1SAZKp%A^Jx$WyOn%zmRJ>oIkKC`@!WYf(jr-874jP;$3#Qqw;&3N zFF~JulC~%Nmf*IyV76Nb0HTVO}Y%z&zM2x z&JghHPNfY>v*_m~dy>|z;zsWh_I!0-EIqp$XTAyqmw;3}YO2jG{U-yTI%D96%2{e| zwuGiJxqM|sCFs^2Wm(yYbmP!`-mxec-bZ=Rj-n6>stbUp!o)_?pM%2R|8YN822=3C z8MOKFI68DDf4}%$+G+;O4Zk6?|D~BTDc!RJkt5Rn(OF2butsF z%%L{H1*2S_N=HT>W=~IRgVw#LBzeFPHh-KC*&g-Gch6c%TzUem*IZ#|>ir<_S~cz5 zJ&HQIht|%}vd1gurSOQkk&x9o$p5GQSM2N}?Yxg%zaF@u||6p!#xgkvp05FR~aYE@a+~-?B*G~GA z=H`>UQ(iB+*>{{&N2s%7PiYS{YsAym=(hi@;8pmz@Ppfsulw`+zn|5E_7 zx12!dkv~{M?o)1EWFhHW6tI}rl8`ZAGVT<7o_{MtS#cLq$fFP}9z2y5e7ea3JazGg zz$o8ZS;!n387%jA#LN{vtf>45UE>$9aMx4_vTQ;1^p$k~q~MsfaihY}|1k5@dC`XD zf`8z_J{Wgy8(5X4knGkV=+j$`DRN!xugOw6Fu9v;jhN25H=HK_$^ABVs}{5Ex_)%$ zKs>h*B}ws<9JcPu#g%sk(*7rpxV^4tVDO{q?9YS-PQvdTY}DpJJztZKWSElgPD7Hi zSjLwu>q6~SpU`@35_7g};R5c*kstzr%Tq3}wv9#)ad2vM48EPO{2w`{~h{)9-K$}jDQv28bmg@8H@Yc8So$^zC> z+Kxvt1N%Kx7rZ_eGHy^bbKkgy znHY_sS-D!+8eoBI`+V@)vwrl}Ud3;_+rgG*Y+`rbK0uqvgYfR&X%Hu|g1&wj02`7` z=#EE@@SZcLfTqR#y%Eo`IYfnGPbaWNiw04QiW$2xq5vDZG@!6?FW+j~j^RJYz_XGM zYypfwldK-5cvTCgf82*_xXY-w%T_dWd@|S>n!}u-1ITUCJHDaHl~Vl|)Aw>Y=Ii+Z z*SilRpO|o%D3uLcA{)>rei%4^*W_Q-Te0w;JK>Xc1ZR;Oj=lJkFZnHShq@ndT#*~b z$-H7#Wy8?oDr1j|jxeRSs+6*{8e98c^Z)L3@M*rP6ugd@uWLQm6*(4T%`da>56d_S zFAoY@K8N}qY{Hf~OCa~aP+V!2L*`a}y#GrD($3Bo4OO*)IOhqxaht$1tchpCUMga5 z!(9|LPGHU!QsDaM9b0+q0h3GI4q~@Su*)oih7?FcQ}rTIUb+)lCHwGOv$o@xuU2F$ znFKY{R?vq9j)Ge|4g&7ILZ_2PRMeW!-rqST9{+bI7j72EdW}M{TF59Ac0CszmPlh0 zTS{@yeLed9S%+foR>MD&B4~LZf^H)v;Nyxq?DD$LKmJoMl94;dFMp0)OkpdFG4IAY zc7Tn)8iEDgYgtP%h^EYJM(3ipY-ILIls;~YR^bu+`ZG`1`Mz;rJt>UEeszNxC6C$e zWMEku$I$FxJok0lBvO7qjB{N2fS*$v$*u@q(0h$itnO$zDl7W3$A$yp-1%bGa#w{$ zEf8kr>?qt?RV5Bnyoi1tmRQ@eo|8IuhOKRLw%Po33|&DLST5}HuZ)Tl=CE??(pV#Q zc)20g&eCI3C&|Ka`#*f`zG@14Hku{INK^THbEsANBU)@y$C3vJV`kib=9Aq}V=pxj z#!pyC+aKoGNGRW9yX5Ah%tbB1(|nxFlQm&R+c?;s*~2%~+^epw@D{4{Sj@CjZIN*@TTZcIYo+>Dc;T7$?CS+QU8EE zR&m0Bwkw!X1diG-m3*{w?S!EVT`)%QK&d=APVer|z||opaK5aEO+8r4 zR@Mv4psXuwwVkTiV#O5NG{6>DRlC#45@WJ(5aY1|e^{9&&E%&kV&m^XFzuZK8r3fu zzgF<8PBMb_`H76=d_d*tQBn#R3Vzf5n8v(t*3XB)my>R!<1YzW^+U*D#WHHDHKGG5 zD!6vE3jDAjuJT(L8&sBud*&B$-)j8X*te%xT>cxRee{FK{GHcm$IPSuK+_Zlp+9r}ES(9^A`8t|UjJnSyo^avUUHZ*8?uipwOJ9Sh>ms4O zr9;H2pG9u&3wCEzIJ5IU#a1oiah;?Q1XbsgjISfA9n9nF5BZCJwQItc`c^)0yFWa% zu!IiB4diJma58sKrQNCOY~X|EsH6FS3KWWj*}V~rUOf$qgO7_Z{*4vpycOtn;wrwF zKa|?vt`b|B?4^b`OG$O4;43*<$_3mQN*;Pb*7e4794UVcRbMzk!K(sB6|LBqx(JK+ z8^euPquGG*`dEGDHq)IK!{*#_f{QttSdljZ=APXH|8362+%IjM$#D@_WY8OlbM;M>xS%hZc_Y zq3L`IDqNGcO?+LAujWd@U)OHD6c`SFaxG!wwvqgC>o0Wc<6cO(`GKp89RU5PM#hc` z`0;5X>kmu7bCN(q{<^@z3Mm-$xg0M%kEV?0ielpn4p^8gK}s(K$HnFn3=?{^2V|n? z&F*gebOV@)z#mB8>?(p_?iwqd3apZaJ`;2 zIa*OvYdg2HaV(_;uO#2dXWZ5w-{}2GVaJeJCAj29vfA0#xS{Q5!St9EJ1yKXzkeBx1jB068@cp}s2@P-6KDUZy9Vb;M7H#^P7J zqiGhCGj*l{E5wPNj->qeImU~O;8KYMq&=u-ORYH8WFyUc?>xi;ypqXGvyg6KC8?Jj zXBi%==m)ch)si2>4}td*F6>rC z7={d;Liq|O@Ph6qZb!o!vWlLAd&jr3o^zMP8Htw|d^^Ur*z0njDTD75QrLF2T((2# z$qzfpk@MgblAd{og->m$_|h-W&20L>PwSQ8Lxe1WuKo(TH97=7Y0BbJmu+Mea}h^h zOXv5^WyE@_*(ukD?AhXD%!);`_C?dUGv;!(#!<=W6FZ20PS2sSmuuOR))e@@FO&te zS<%|!?I`OJjhb(Z@lkmiJMym+yXgzMPKjaovIpRkEATX^1wpQy$FBjR7pW{@WNom(G% znkGI~vK<+}N2DBNKs)v+;_m2K@OGIuW=s&+x;FJRQqKyuS1ka!+*>$e;WvDit4fy+ zeB#od$Uvyj=70s?IOi!LHj=gfQRbs@?DC~FxUf16M=3sFnO|h_;mkER{t$`@{;Qi+*NQPrGgalL3W zLxwUhrP1@c!>mEbpIEm(WKAvixkKJ-#Qx(9$W|s6BWH6fLzN z8~I46w{BzeM82o+W$Ki>Ac~miI#zW!Vda6=P3E! zu*8H9!uvW>o=w=xLl(2d>Vilt%8?P3B^pvliZtAMXvn?ZaY}sWZ5NBtk^s30HQ+wF zor|woPgx14X-RH08}sHK3>meCmW7N5Z;1@1mu3VW=e4lgKAR3+8%w4uf_a;=C~EEe z%WQ`{LYYHJ^kZZ{Yp)6h$D6NMdeuSlzb0nALf^x{Q5JryxuQ(+6E1nw80frI&Xh9l zq58rbxWjA#4OzO9vPYHE-{(K+uUH72>KZd!vA}JNDi_5Xj%4wEf*)=3@LEmHVKBWu zQOMEB(1lPLI_|ClrB7!v@n$u!HD8F|i*p4oMFXpLwE7tQ!L15@DUp&VFXZAC6W%+bMe5iYZLpeG@7Sc-7w*x7NIeKOdKhtvjR z&J0iTc@#oXRcSEN)yB3taI~#)x)qr3*5P_8wm@2J4vP@@VORWxEMIUA9{ad}_Ulie z`T?qxKlC4Qny2~btCr%5Mp>5rJq{*X{$LSRjiwvVc_{Aga$@#5*Ql-=W@j* zk;xEkN-kdyk0SCwV)-Sw6*pXT>}C@_Gc-n>$7x)c?`%w+`3e6nFd$lSvXKH~LGV>JwUn?Y-4$$J^bOWMDv1vdj0ClTm#OD)I`lrt z=!c);h&7rpTr~hE|-kJ16I*i!go6tTy92Km8 z3v+)n+Ap}E;s;0Z8-7MXzBAA1`PDFoNhYATCxas8C(?vmIas^w0J!?ekbB8Rnvzh$ zRXG|1UsjHP% z+a`m(xgpxWI1E<$#Y{fG3_nfQfMnQ-ACBvi`ldmm7|)+@9%mTj=L}yt?mm=KD#{d{}h2ify=3FX$X6&d>s7@kHWf`hiK9% zPi~waUcK1{Z(JRy^{^W3yJ7)C5GE4F6AV1b}`>$xstU7ihNnT(q9Ho?=F6ojn~A1ro+hpmnu299AL{k1JOhNDmn;U z)}>Yd*s!57n5G~nu)5t)f6xX>sp6<}z8zj!(Zi|eC{cvxBiua19AesEqMd31+pV|- zPIk^>52k--W_paO-hJgeT^*S1*pIBm{U}%eViG@Yf-Z(so@Fz2I=Fy*JxCn&n)1|( zm`t-K*hv{dq0?w;3ecu4$5h~-aMlRiZ%VI)d}Z*kz3g()3wR$rj!vEY&PL0A#%M!( z@K96Y??C_?o-~@4)VE_*-65=A(2cM1QlanaBk{R{7MA*8Am(nJ%Pv1mr?>tmnD>n9 zxF9|UZq7A_s&7~EiOO)Q7c#}xCx7wc_7=A7Wduy$w2*}j6n6f~PVnZo5%{XRLF+VC zn7{TWYmmy|Q**7@5aC?t;yjHjJ}`?44#uMUpd}cx-~sodM-KmT0!!0nB`Th_qR5Y< zMI*+~rsWey!LOM~6#K9WHjGk+@MoGd;A1|nnl>4~R9T|Arl+_yax5*4Xkifsl6=C$ zZB+f&lUt)Qo|N^j;gZgCaJoO03!ABrU^X9;iraxV(`8wo*Mq^BW_0w4ud7t z>6FVj?7v{kC;eDRu@<4=)VZA%DLL5;Dwm)|ea~1(Q#Oe$!s%y!8zvu?gDO8of!DW> zekp3vcRMZE?miNRw47yaow6WNy@NQf|5#C&KbaR;(94f!>79gx(2-CS7le$aypCqP zUVR!4VSaScmfh;P3VOfV*@RX_c&+b<5nERC`Ztc?m354Jo)bog-X6iu3&;5D zN3CIMx)!zgMUX{y2JK28&+Kf&(1)4wiJoK0HYFNZocjVvppCvA81}xV&4nw@$ zXi7r-ADNSY@W}}tr$-HXkh)>jCPprWL(G*=(u-=by$yvvXluF zE+y>GmTAG%Yxy)J^ALTP&tqvVmw6+%!%TEbfvp($fyqp(V|uxZV931&l#R9{<%%h6 z&@p9NSnNn%WM@#$*bI(;9UeD@Dc zq~EZKxnohch3rnhXO~7~z>HCoP{OQ9v?1pw{!#R%1ur}K11-U< z|5P0|?ooo(yR>0Q(Gl>qNg+A2Y_y!K3L|>PLF1Zo&Otew%QzG-I%^%yq=igd-$ywR zop)uozJ;>+DrV5LN(sLtTfl_eBe*HQ49<78VDGVMRCcbNPOR1AOb#-(>G=mN4U~X$ z;vuwd%^mi%ZZh2o+Xkt}B$!%{BMeh|!JYB>K~F3~@I`bnZ=?DZr92J5J2{$+t^xGr z(IFF=ZXZP(DY-V=CWNE)5$TU4{6?*hHX92S(7 z48OAWunA$Bn0qCP9eh3mQ@x5<_J4{%dMn9uvIcQ;`lw~rWp*S<3Ek6;XY_$s6s>spr6;1GUVh|Xwv4KCX!CXvcJo&!5 zz-~os=O?~AM|JB4P^^0%j;$&~HTeWAua~F%XO=kGp^JsQ9}icoh39q8U2c4lnBgIT zr))5jZwgJqmM!-%VA~4p^JlbCF%;H?%z(s#$@nc|74uz|PZmpy(Za8j5*-TYV#!Q# zp3fc_qCAE!zl-4BNJ-(e<8t7B^9^mJLgro?56sO9!r$hwyq(dkXRa`-Nw%Yyzni(9 z$|-zD;|9U`H3nBk97D^nA>feygEPF}SmD_eXBmd}DClm_X0MH24u~5;4!_ns}7pY`8OSH9k&i<5&C5#D#5j z=-9pvO%6qXx@{dN(Lb9U#R-J!ev~lde9cyk(RBAoDVp9B_Fhu)5PB>TTTTq5`~3+t zhL?ou?a%OI;xf>j~zsz|TX3&pt{TGd3r~fEHci4gAq9VkQ4g_Yclq+QrIW zuI2kL-siRQ71{5e(@gK~7<$pXU!?0C1#PGN;l6nYe!pS{9U^%OT9pJ-db6Q6@C;L@ zY|g)RGHsl;ka?DAa25W$Sd;oKl>gCzb2%)W3j!?E?A1EJ^fFloA5x1+5zUBn5dU-=IcX~oUyOPn;Zv=Xn-GFX)d&)g@5>qT9 z!6{`XjQwWFE}p1|FXJ`{tScRulJCMSAIrj?YjMo_?HS=Lk$_s0LZNoK8P2uxq#eh% z@M=X9Ft%qR$!Xuz+acJnZ2P%W}lD==cOU6@8ov?*nmm!B@1%`8j8; z+>QBTG+~^18ybEn0GIaR;xbnqmM%3MHmT1g7;HpqLmx0Vqe+-^(}xXmi^3sVC$J~5 zpVfUB4nC6ZY`TgA&3^F)t9`A=dW{#oU2>Fmxmx0CKR;?)bDbi!*5L$aX9(#^hR-^) z!9o2JQ|VEzwXg{y%@8?o%Cdov%^~z_;3=Wwp+@ZwcTsz|3?&T_;j-Hb&~rVQtV5GU zWqCGiS=BR9v*;g_|1=p-$84aEo(f7n9E7GP7SL&_J)+YWV)zZCUqXGxS>`{mjw>h? zGONatv})`qid$5`)mRH0dYfcW>XL%hQCS&F zSsfwTvOWV>PYQ#r!y8zg#d&!A<{FaD9Pt&`Al9|hlh*FlC#hRA!P@-})61WViCZYcQg5mgAJL}rCP2qvW#{*WYHSue9k+11Mg!V%YNo);OP0%(6~&2nM_#* z&&OM{l23iOYV}k$FjEmq_ISg<)G#uYTqIih%8<$iE7JhgJl>`Bh0X7+pIO@1F|`IG zEg))kFW)#vjVzsKF#~}I(Nz8-Wg%fsoTXZhC$zp?!$li}EDJDAb88~#3X$5m4;sq)Wf zT($5nm%e5fl%Af&HWZy^YgquBbLkq$YTOn5d+PwHtO5sZI>0(Z1-7<=I+?;mcGatj zhQ%FW5s|y0MP3~*`+87U^i#}e^<-^BBPmi^@axescw;2Y%4_$K&YoaW5Evwy_Q7C$ zf@fyMYoO=ucP?hg1zgoy1Lr=(u(c-P@FVCv%lM~6vu5g1+NH&G`Fc9sGEkwe+WYwU zM>Jb-*b~~N3~_DKU9SDjO-@xqiXS6ohb_K0SYUi1ST4B+_r@oX`yO>1cK8P?-dKlv zDobd_^9R%=bd|mT^{@`nLh|k{W1i({bl*T^`>_?_bb*wpet9Wu+ZK;w&&Si*zEQSq z^Nb-3huZdg-=y#@9e8BwTG7j(6x6<)1~X<`;naDxVEnNiI?WGKmiZVq*2s^z9Xz_N zaEFc&1=Os5f-*+eK;`E1wDuZN>cSW3yuF5>`0oi9Q`SpZPyD9$Jl-9lj_3IrPT{FH z_0FBkw{BcUhQG{UijZ574Ll2;qoy#!L|GcuRu0Z3rC=;o!^yb3W-&OMoif=-VZ&tL zZh#+IwXY+EH~w%#WeS`<7)LXTOCe9;Gs~O-ptY-&FHuvg754_w@`EBsU33tmH+`aA z>n70tW+y`3X72U`NjmQyL_L!y(aWe(n%5z8tNT^3bZa7EqfM-NT>$G|qYLBQ zIhL8Vm-)qqGXt*+q~J6fZ9J3sORFQ{!rDkosT^AS{`wqH^G<-2f6sZTk&&dS(7-H9 z1!kd*UdP6gv&afX}rOwmAJFSVveu_v+*DYqKYAOZ>vDJvCYCtF7GFC*zoT z?=VVUms-t$JerfcP8NC>wDow%>Y{&zi8N%z5zBLu_ExuA#vVFu46?G8hkp% zJ^WMyk(U5wR9CQnm`{&>>Du1Lcq~%XWY0u?IAQ-1_VKa`m`4A`eme=;3orK=8Xx2RNvtLIAAK$S2 z_j2%M>P#rwnhhVKi}1%wGk7;V4wp2kqnA$D&;-n_Rod| zfB)iJ-8$C#IDvi-`%XrWoTyu1&VAl@lFmu3g!b6!v~aEnp3Y8YM<&c?FE3xmK(7bv z#j_xM_S1>k2MY7@W9P)V?WH&aMnP`odzNc`6#9jpw#?`W?5FP>3gL{wsiB|#o>!xR z`^{iWvK`)4(}WKzrr~VgzpUZid^n+h8T`A0XvKd9RK9RAo9C!NM&su|%Wq8>cwd!S z7Bxd>O)01iHK^ShG?>;@Z0F&s0lXgF$#vBT`P1D0P&Krfxg}Il@Hk-?5b4Hj0uRAf zktWCpF1^3&GwF{Ek3quz^pEjuveXlvlhMav{C~l0zm=DeRhOgd6Vf4XmoA;U_XeJB zvamId$iv<0iz)tgEH|ez3bkbq;KYm+nqqiX$RWSQ!%Asvk-9mxKX9RoPKRsMPUO<% zneW#2EDHKPQh{+M9O^$?1GUxJ>QZg8jY8!OuTjv1Yc5o?Y zL{9{sm~J@e|C6YlpL8FeR^^k?mqxJacnog$a#PFMrjzlJ~KDfzKb;h^q=B zL0XudEet(`?{_IcUvdwoHp#;6b6M<%k^ooWRcf^MlqsK42(vK)q4EG=Je# zrjjp5Q;wE%xxei>_XR(>8hvS)<`PadoEe#DS7B>G1^t@piWi>>&)S0+N*2%L4ed@s z`>-gGSzk&+f@ZpS`cE703BFkmrlv?1a}v)_BvA zUY<^&jE-`Ad?JiAuTF!hIh&Z$psj3jaXp@0dy-q1v4JW!2z~6@6M$NIlX4U)LqD z*M05GWmpQkyp*Al-V%Z3UPxKZMFJbu3LK0RG3ur(%Zz)^2bC6)>Slj7LDLuR#O|TW zzqjFKk_N46=n=KL?qDmp@2IRig!Fq$>EN7Nxcb=*vrdR;uGbmZXLz4|`n>|Y<6LOY zgLJk}JB9v!b|U#%W8skCA5oSnAu0c^h*rPL)2F@Hu zLiCbOZP8_i5fk{6I!S&mOS@2I}^&9M9 z$ri*isnrnDu?chpkKw#e6T!*H-o_>{0^V2W;@CbnxOU(XJe+X=E!adpZR2BBhJr^a_uOPDw zbu^ry&rS`PPDb|c*l4vZe#W!W6r8#mKTRJ2Uiq!mdaDzcZeIp#we2bW%mudn$y)ka zFc`lo#0!qk06Z0EL{lHcaqGlmKq+P%-DGcR_o>BD*5HAA3gfAH|7B?YUeA)GqQPfv zG3{S}1z#Rhg#)jp;3z*2=6kFo|5RzT+dimPZS6S>n{yOaTnWW&s3Ws0Nwm1-8rJpB zpvNypz`?DuHE$Be(0?6@wD;UQ7WVcK8N?aT{D8UmW9wYPwq#k}#_Y_GNZyR^T$a=9;(2UmS2DY)cbeN=qZMTr zaQK*PR1OcMf}OT7yV;&9BkY-NV>#=d(8up^J&T8fM$@f5?p&FBDn9PHiuZMXvvr?| zs!v?!2J5;(!rMzMt2Bj7R%FqykaQd`WU1D$ybdOSFeC5GQ* zvvLl?pflU(s)qx7Tb^Gndqb7>#2+HB?%OzwR>8{OVs>+31i51%d#SO2P4pW@Dcu?P zK>i4rd8pu^&xf&S;u=goC|6Q7T=2lIz?J7t(%8xh*!tFh-S5psgGgEU zQe=R=U5fOuB8vZPSPzx;vl;zPCa2*&xWGT1jQ?e`G=cr`df05X>{bTsoMVsTlq6I> z9R%m)AHl@H2fS`z2PysAf`wOG*gA_sGSzp7>CKBs-=qhJkJq4=gQX#oI|9o#O``D| zklX6Anz({3+h+GloyX5Jh$nS7i5u)L3>GlL=VhZg&j z{s8tK`prfyPsh6RFF3YwI-Q$ZhiliXz~N5tGt&Uw)La9={mYTx`)m|ru1oDJ{z5KLSS!}(v->PY4qTDHse(}gx!)NN4=2( z$|sC{Uh^1Cy8oe%LN!|)qE2g#Q<3&g!`xSCFxf+u?t3+|eQP7xRlJTG0V`9us zEp+yHJKNjW2EyVC+5}#e_!yrj-uA z`^$Adm#oEw#EXGiw7Fpr^nbj~S7bPJ03H#L_aRp9_@ zT$C;_Jb}HH+{L_|ZpDxl-1IL`*b14^xT5hDdc4(R-Wr9l-82g={YT*5 z|CZ3p$8t7K_Al{k|5NetQU{@jW=36por2d?3+BBvphfA;;CRFbau(F1^w%tyd_=ys z$h#f8*80!@4Uu^C=5@d-Wg$*Oo2IPvV%lEg!0O8u?%y6wdi-0_*3SM1#E)~r16`Ti zV*P(?$k{jCfA8zS^ZFIIxM4QU_s-$xNAH5~Kcm?q6-he$Rg)QLodly(@z8xNmV!(6 zgY3dVwCT(V8v3h_)6%^PU9JbYH(TaG)f5T(Fu4go)a22UcV6s|mo-0Spe?rhwGbaM z6uM2$(}R{hG%uhXzDC4w*^xQ07C|n{!X9pKA)@$m4Tu7 za`^}fjI^ZLOBy+G<5YMo@r;#EF2HRg2Gqg}Y1X%6DonnAlr1vM=S@?;u%A!%vSsvy zDrFt9M^h5Br@dlLW3rjA)I(;zUBcF+{~=Rrn<>s-b_gzgtc6+I?uvZmEkxnUt(=kM zTkJZJ2vs8^s9)Gq8zgO^Ur7r_f=*G*4MRHPqK0O|*u+O8hkd*39n0&b>cH zUu(MYX1F{J$yrO2-w$N*Ul+5OJNMB3>1!0F&SQfbM?>+Orzpq9L58iEv>itXJ#4Mo zuXVa~H{^+ER%sm7ly9JNml@O}QORd(NwEBN>g3y^R=e`(9+5{$Ki};-9i1DOi1PJs zL9vi&ipVR4-=@cL)tv^M8dk;$XMMcY*Nie|j;yNtJ^OgOfIU1aX}j?3ZK!>=kL#&X zgh|SAY>xgHN|*gc3)2Gd+`~hZjn z4(?a4@CW|Ofsm(TsIz4&ORhgjO*_KAtt_Q)TN~-LRw?k8RVhqLgLyx{ z1LO4;QJC}*kgS?XwsB+F0CRJ&)fYOF0)6r0x@2rxCxth^EP-n2KO`&c7;6O<(WG5R z*wtZ6$=Fa%@P$a(#ynU`k7Y9O<-J^ZyLxhM^UVfuD*b>rqjTV@B9P|WT`+N1Gn+3} z%sw7Bf-yeouzJBhmS;SbGP_=|^D}%QWb8vB*ECc7dyE5r{%i)T*0@VbyW%M8;TPu9 zoyg{h-B|x0J*xY51O_$#g5kcmac;>l=F&YFoZP!XWvmj}O*N+AtEprikO#kZKV|p+ z5xUtNC$H=2sOD)5*#al);UiaCXLOh*H6&7kjRE~^?Lln^9U2@b+!K$a(}cyRN#kiI z-Tf>DXCHNQS37Rv#v3x=6qm$i%5J77HhVsn6xI&eD6(tTh8nRn@AKq7|1CG0 zE~HMOg66?^Pv-?)D?0{PYQ5O7OCRxv@o89wckr&pOnjg=n+}DKf^#)4&}I}s5A90W z&5(c4F!diEuQh{!TbxO>qP97h@Vd$C{$0%-QY9W2x$Dw7}cS<(#8Fpl9kVI&xzhN$P(lzx(HKoBc-q zouvt!O|;^Dy843C^i0-FHkVkPiZn*Rm8|Ik9sM-gjqbHKNFy;5$#7#ML z%Vj?uTX~EM7o4P@mzN8yypQazVJBMVJ>&dMjNtjM6?ocmI-7VlRQL^%#g{X^S>Up@ zys=Cv-f2EY25G_c;b#FVhAF{eVYcYp@tmDZS;-`VmcxTqZ5C284~(AAf{P`4u{w7b zC;eg|o1WndrNc(j`hAOF%DS73EAC)cZ(L~CFlE|2FBuD0XQRclS)^MK!-4}Q!^Gj$ zu;X_q-F5Clm-FVF@$q{s_rVoPWfqXW<}}+awHrRX9xkq2zJ%YIdl2JS2rkGlXQrH! z3>sU6%vDeX{lI_BY-JPbls>_<0a0w&@_uaKJJ`?tXIYY85;@;dC)o)etazw1@}AZB zRrlA@O{XRn>OeP7omkaM*E`{bw%tYTc zMKI=oy}&J=$AXqHFw=CvYu7)p>vmrJM2k+GL*V<1UWjchKqVkxmVI;Z@7GvQ6!4Ix;2KM|)rXzb3$aLFD zick@u-PXM@qiZ{D*AVV+xq|O};BYc;X=N4xLHK<60kO}67;qHwWFcGT3iI&S{KB3t zJ}mq;hG~tY{grCelD(0w9{+^By^$>JB&I_3p%>zso5OK~4p53hJnivaOOfn6CO+Lp zmGv9x^F3eGmiK^R#dlG&avIyO9mVEs?cv?bi0_$QBK|n07>CabgW|A}=$=@M_8*ti zfU~u9`@R+hK6%8Fw34_o)jS+oHi6_K2h+M&8mzl-0@w278=w5F7DE-rp`qRh<`|(s zzV>5bhjkrP%Q(}K{xoO|E5|Cs44hDClJla0iQf-mmP zqg`CK(3xvaIET-QbSUhMa94j}N_RdLFzdaa(IH6^%0KLY0+|-R(C89>VAE;pxtJoF zZhrx-XGqbg-f#FAOu4}o+xYmguei+lethCx^4y3jfW zH!)SRdb0w)|H-r&T<3x1NyS*X=#HrThb_JR;6myiZeSSPfaOBwr&n?Wyv*Op%pYd6 z(2zoC(>%qOjqc(U{PijMLlCL=Zh)f4Pk8&vx8l`8?&;6(4*Yp&Gt-E_&41V^Nk*DR zLXUR{^!yymm04eA!v{?SY8PC;DNnhr9@jW;-<7y^-aG!a_%_==a}P9ZtK)P1W9YhL zJ`Vp+o|g{Sf!($1+0JBb{!Fel4Q67d;oQiJ&rYKCxRoufjAJ*3D$&DTOX0!PY&5A_ z$-eD0#ZeA_S%~~(>`3xN+ecgBRKa+VTb2t35_Q-id5lsXPN9VF)-dU!3k*f|S|gVna(@23OcH_MS}1>U0ZHWO(11y_(Tj$->ZFJas6 z2E(gogKAHdN!9$k+9L|QVJUR;N71!+)}%W5BL1}rg{20m0=D6ezX$!1r2(-8 z_-7^MoY+i8Z%?q-ii2zOe_sWix7*;;%Tk)KqlPNV_ra%6f@9#vdP*1^1cw^y@WQnq zIHK|&J0LljHd_w@QP*7hz2rEJe{Mq=65H4oI0*N{joGXyPoU=xB-vyR6MWZlgY;*i z9o@iEuOOx;7fx%|7SPtPY%HjhV!F07sRDzk&8Sjfb%yY%jb7vwkV&DNPjinpCsXo> zIBL5iu(vlX$4O@wv*BL5NVDM?Onx#PIt*f&jl^ZVKIa@{%70>V_oPX0`&`oa?>N0ED*wPt1eV3~^vN)3+I~`aTLj(n zyI|C?H}GXl0z2ri3%;h^r*|03yryVU{b3I>akPc92gC8OzzDohTL~LmF8xo@dH7@b zesMgJ8JQVHN{Awb#B-lRiIOHs+CwVon@Z8NG9zSW7ZNHJGM@W7Qc_eBN>h7lFAe$K zzdyk1W!?94UFUp0?>9eg`4LES{|Fr|srawC5uv#jwmFFT`yQ)sXtyg{J2y(0ul`^u zS3}sRb>S4)dzbm~$7o^EH)^gEv;nz$=%~+04A^py4|g)*`oqqF;f=ZY$lo5Sx0;iT zd=Wd=9!q_57tyYV#ax$=MUET0p1T_6M!WW(VGeJV$mL!jQyeoN{5Q+vR@vitz2pZ! z-}Ox0;Rrpd_uN2HI>v(IrXKYB5F5irV*e2tSarmP9h%@l{4RZaf74Wo-gFF&SB&Ix zb^fwQ;l3j|+!(&@n!#r8iv>+N?fMrZ;_;5lf3(EQh;GOv(!kawSUyy;{_eIVtgc*O z+%~*r&VH6;XZe73k8ObsL%gs-b24oCx&wwRJPJomX5p8zR(f9&1->GGEL-FQdEW=p zyr=uNK%7tAT2=R?x#skll|vkW-}sc8|0%Q}-g*I($F%g-2mzNDj=o zoW_>zo?dfKZ>xi zy^KwhYGT3j;*BGPC#4zzP|DDXI{U{rd2)J#C zFU#5Wot3{ohr`TW*c3B+rgo>Bc?b2ZY(tvb#&D`sG>X+L2Em!~NKg zQxjn6f6;V%(>vDu`~~Z%?uO&CCD@#(N5%hSQDc}EuDNyy>&wKjFIAV!Pk$700968C zO_sD?5qIo!3;P;TKzT`{@loA&|c8~q1QCxb_Jc+}03&P_}M`PO2Tb~%sp_H9HLoi*tEqY3K7XX(PoClEP6 zgZF@7nsI5UJ@?9)@J1B{b8Gf*p=;!D*cz zr`a9Aibf{mzN!`E)og@m^9Ex3s8jf?@+?I>TS%P5Nc(R}@9^R_Un-6J#9S&hS^3&y z_%O^G)`tv1ZH?7@;J;AbWfx#`^H3Uf`!X0hZD$QF`)Nj)BF;K+md2Y4Zm3ZOoc`Qk zHlm!T&V8!b{5Au=NFuWkShK+|TUqH)DVFan%WoSKLM6^|?3Tt=(DSar7=cG58^t)? zHuFzLLec_^`#FhnL#}hdnZZnRi4VlTO|J90Z-`U+HnR82jP18L??8ooeca<} zPGd78*^ZVf{_#|Q9P0lTLUQaWW6omw@T7w+O62UC(;0+SF2QrFlwfRu9IkwA!=CI_ zwD%daoh{j$j2f?h!4;J($o|UlJ(_vAytfCnrrzf>WxuoSd-7?AsS{g#&;i#Zr3>9^ zV^$g{Ndxx_Gv?1f`3K&?utxB6Oq)N4KWU+7zj^LDy56~$Yd3MF?><7u>QOKKtz8Vy zuJ30qX~&^fDuewj-hvM_b?nbutiq7vn^?E0!0;Gp%(g8#gg^V9gVS7H6xG<0!pz-l z%ezw2d6X|sbw9{k&oPB<5!RNw&F zCL{kZAe5?W2b1o-C1e;o8MCPq3ubt+XR&<{@ogO4n5oL=P3`7B$y(6Y_nX-C)d`d+ zw*a@M*ioH&KFU0@z?qE#m%)4sFCi!7tdvbL?_WCU+zBI>0AdaflgPN=6~FCBLPgC3 zY{fNyx-PJMU)5Y^OC$;*d5Au1n5@fg+3iNppVKMzLJEZ@%q0msV_evl2f-c_PY5Ki**&)jHi4c%CaTdFEl7=B9`4dgmcqegMd>e^v4J+xLmwEi|aq zW*7x-T`8IbUeH)O5o^9$(&Ttio*h`{fjTWKu(0J29g_{G3+K#8<(dsSpZW!vuYKw0$>(s|s)!pXC-^{` z6iH`AAdS1V4+m(tL*AM*q`mP;gr)PPf*+vICe|Q0UULIJl037dP{e>kJ z9ftACy13Fu`vnJaASLfU0P2ZyXlkPe|23^e{`)g@9pi+LT$bSWBS|zb_#|joj>5ar zT`W=K9jSOf7IGvpyyWShpjLGV^#XkHQ*Ii@bpn>$=IKmYmb2>-kA^b=nA0|M7yUs+%#{^shMP_iUKBuwFdi!5`5~^)B4`?ikvil)LN`;_*2SEK55XJ3&**NbPNLs4;roYc+&SkY>JELvZdz)=vD>d;u6HV! zyuF2m-u^nn25#G7LDRjbpqa*0qPssi_xF3) zimB_xE2?P0Wa&yK!v9*jg0>$+8xGGlKo@QzN|nG z>PyjPf)%wl`jfJWEM7{u1f~{+5P2~M<&Rd9MZjOk^ymV=b*Z>=$1YLH;c)16wWP-> zbLrygSL}|~NRT+7WPk2PC+*%RPxkkp!oAcPEFmS4yitrecY#TE9nJx|~Q?-f||<3Gbt zrBpGyB7bz@c6pMc!N!I=0jkvmuE>=4TjWpXE5AVe@Pnv-GY@a?67FX) zJMl?Pe_fpX5sH5y+{+hAQnXYXE)7(}_g3)`GGq@H_ihK*#Xq_7XQ6md*9GNf9)ix5 z0c_3$b-H8tj!7K}##=Us750jhp}B|~b$tw_9eK=hvP^JjW-G2dTMjDUY)SmFjEbK6 zplpc~JGn5B?X*+DSHp$pZ-+6iaC4>E!7FIQ0#)|n8H1P*WvW_n3rr}!_j)(Nv^5HF#BC}} ztP8t)x5>k zSMYORFSB`*Luwx?LDYYcy?m37*F6pJ$?kUEvV5!HQ>&mWx!+;t{wmPh^N2w) zMefPBJghVL25pM2?BDG+G@O|PlNJ@=*Ez9Jkg7uE&uxWw^fwS{6>vgwCP|KWq~q>; z;OY!FytO%ojUMcPBi;7kstrq+W7tsonGu0=7kT4p&>i~BQz&r6d<b{7!UnX(*HF?t`BL zDk1y(J=`*K1|;sRWt9`Zc-&OI8kYz6T7>&k$Jm%ql8v>SWLNSW^MFg@P&uqfJbAo%i4z`d7adf-a`e*-evZgStBN z?0XL`{n4bcYzQW;xB^CLn}iPVS7!f8U^u^5qou+b4qi61*9YHnQhP+S&M%pz9dN+S zxgLC)khR~qc`6&^AxEz=mtmMqBsD!>z&tk%qJ=&Z$g70l_WRYaRP{XTls!bGRDg?j zEEGB>dgRl2j1O7e!Ro?BQTRGbEby-7je-pj6ddfUP8;E$Rac=iMMQZAy5Y{A;n-2- zK|{UbVdC^E2yStqW!I}Qw9SF68#Hiwfxsz$e3M-dSkD}7PQv0v+gbCk*N~QYnx$>^ zq|(XXnNva)>w0zr>Jx6W9{VQNX?d1cS3J&q<}{+!)wQ^7vNNUIkEET|JSnbw%gMNC z(~ro0u~}OQ^P6eQU(oy{{61f@_4_N(|5qlJwSthb{|g7t$xvjDALLw!fSo6@Sapm8 zyRbl?(kJexwF|;9ZiEDTx;T^yHiY98aXEW@CrLE6+J=>j_Ji-<^%yaP=k%Y2F{NuM z!nolbe?@B&Dr-G~%uqkl6PTJxVG{_ei@8S0DX7)2kL%x6@cUIBFs?iro!hkW^w+Pv zR$K`i_el{=q8DLOhPJqONh%Gs`Nm#u6!`w?517H9fo$(83urWW!2Nq2MPoYVVacvD z{MQ_13S4YS_YOtlT!WWF9#Wl7sQ(pzaCgR^w~n#>Myt7<`?P3acoC~x8Y7y#;WhKS zC(jOj;lW7SpYoYtt+p53?5V4n?VgobTPDXMaR&>udd%jl#nXeV5_{*jmQB+} zaY=tJrH+5W{g^$~oWjB@^}+DvYxsKH z3TAKefemXrV0&RYuh)77R}8r$@DOTP{Gln#N4uIuCNAJ39u1{~IfwD*!$kHpz7Q5Z z2*-=gm$-&H6H0DfPdSSX=#25q8n_N z?O0kd-~}`4sRiaO%NDEcV&4r+1()7Bt_G8taPZ=CfBfO~GX8LzcpftCov7*XJWLgw zXdaT&DYC^CXRmr7-s<2<4XM63b7e7J9%G66Pj7+ubs^VrYAg+J951-NXQ6D%4pbUF zoDZ0OmsWZIP%F!k)*kuHb9{ZXVw<+=_DbFb3@fKPps=!9sRCHZZ zM5QBh;qRen_Dkh0Z*yZjo@xC671_XMUw#6Q4xD4Jz=WS;96_HZY{Jjsfux$&#O0_C zK!=}Y5I=hWT(~t?a3alv-rYtR;s)i15&Nc= zMt=S2SkQQq*(WGso4XwDZI}Ve)qf9g8hF)(&ARUd81i0V`Ct-%k;A!*V@^ug2~j>w-9&vj1YE0&$vt67F=ys&4RAT zVE)rUUMp}O=6bcj^iX{&?VF62GVOe}O)kx!AI{FST*A=h)uPq2C&0>A({bP7JkgG@ zRa|_y;6g|!BAMAgz(hV3T-yCzA;gvIQcirYd698POsE%swxKHu{c-0he}QfN)M95Iv%9R>fWJ{_n#y z&?l2i*(e38-`L^xRAXpLe+Nk}LD2kY57nm5qmhfvasG8Tth20#PfDS}PNPH=H9MIk zy6(dFUPYQOxI+@#Ke73;>-f-jjkM}tDqeo%%%J}#e|MsU{fjaB^eyu){OivGsR#l| zohk5$%iyZ4Pw*o=t7wjNp165zHk?*UArJpHJ}9^fO1~yjX5|LbJsLvKPyJ#OW2{hG zeQ3S)9bdfA@)e9b!@({zQyg_^Jb4biF3gh*_&WoPP_LV@HGU&;#XmWA`NMTC^4KS4 z*gcs>4~Qa3BU$!H;DjFjs!zXuFkb!JWqx7IJw9G8g~@$wqd`Jn`LVE%RCv_N85gc% zhg=5ZGOZ41mK3v!P7ho-QghBzB^284 zjiUBLtMFa(C%$WsDK4=ZMS&$|?6i>+r{#B$ZMS~R-$=8^gv7IODo+_3__r`baESbv zaF)IPI0|*vrf_MK5v_N=gf>wYV*?(r=KVF$QWQf6vV7@M>sz)cLl+l3{!HZwa`oN4 zbLr&i{dn~~L#h6kyo(=D+lQgJx#x+ch&uw;c^5(dx z`V6bQ-vKYv-he3Pq$q2NkoOKcK|j}3kiM{A@K?%XgHHxnn}(gWM@< zR{=XM-i1}wvgpy|B`_;2(L1J?cYe5pnQ5g#zStWVk2b}5e&+-S*>BER^*g0*h=JRN zni#lZ9$c*Yz~rW<@~_Wn(w?GYV4%4S%+8C*H-h8d&ELc#(~FqW+Lf$KaOkSX>QS(l zH(RgO0I6$8RC(QzHrtNC<9D)A*?K8Ni`7Y0r-kE}WV3!dGn_YXCj0Qp2hEEcA;($B z5f0gl$sInpB}*Cf%NxLZUo@BUN0ZfQR`YHX{K-34aDx0b;aa4hvoSj#GMQD|p}uFi zFdt9mraqLy!Np$aC(L?7Px?~ea&_vJ)q(dzl32>XF*QSfc|ox36w1(V;$kK$!?ng{ z+LFOhw$Cz{+z^3NLvNwpn;8D<6k)&LFpjq_HWj!H`LtAP2)uE#!!yfnvRrosdUW@2DY2dqp zZ+!5yX?S44V%Tal0G%#RWAir3vyRF4*vXD$!E-~LYP2-|S{I7f3!La%lQH=`o{UFg zWTA5ZU%^Ke3s)N@@TB@)?0lJl3(Vz-P9)IiJ(U9UUX>QTh+=rcf(Bi=k0+)|kizdU zQD?psTQKMdYdm+J@3z;3D))u3OrnV9j49*KPwHp2vp0!VUH>t>@*duOj)EzRZ?QVh zLah7rhM#w%mp|B8%zRX2S+8y&rf7?dQ3mowcza1*1$FuvPUm@eJ68YZJ5*113 zi8OETp{%*40^5N=Y%IY9QzdxMeFt)$3E<{Eky2hCV|8jT@xp{!CVKBoRykWa|JO1! zXg>$bCf#PnYA2ZK*;}wj=Oa^-FJz%oRa{-0DQpOq!P*?bixiq8mL0x|f7vF<1>5?w zUUNGt`Nr6_M-%B-j|ur<5N^0WoT-dXCWEQl>Bg*=@Xsa@lO1xIvBy{}_7`>}_ucXB z+%P61Zs1J?uKvFKJFMDN7fvYkGx~UkT~Qjs{+L$^ycbVu{nS7XTRyONiV;+^u?|^g zEZ$NYjA#9M=8z+aBa5ct$z|afTQnA3HJ$jH^HzeFB8>ksNu3tWlZCQZtq`943gR2o zVf?Stf0(|g3vphxhJtAcP+LlNfW#Bqv89u z9Og2~m);2Hwg|l!;-&Ek!W?rb?oBv?zuv{dpR3EnSKWT&-)r#<>{V&&0Vz@5aBX^! z!?RyAEa~R1O!`@=#C9v#!uq45;Oomb+>k@(C}8v<)OI6iS*$}(23>}Cbq9F&TN5be zm>vbRt*377EBvuZovf!J5ZB73Gr#DI%yW$wTDwLe8foA`p;MOlNfwX$<+AmTTj`zP zWikDlhLcRz5c{WskAiBT-bR;7gc*E6_EE02)`z=tV-!vd$mQFQj{pgcoisY*JMB^% zj_s9`(IEIJ#QwCPaSt-6;e$Grr16}~)l~xTFoV7aItm#Xsa3cc?k z;5H!&gZ{21&#eX|cPSpChFFqQfi5-N)x(O}yWsfP2mFJ~KKQt+MU>fggsnF}M&Wi} zK&ZXKfZlJl(}Q)8uis&KayOl(e7jpp1@2SH+y*|NE3L36CB+F^9=v zlxO0`4eDD)8t&SdC^wI4Z=Z+hXoYubGSESoeQrn#VKsNtLFJ=7b8Vxei-If0Rl|zHa@0<>Au22tBwcnY;l=JZ5 zWdlEXc{z3Pcez)p9ND@~Vb_9dSlxR~{`vG{^h-Pdn>!Cc)yi|0$1 zzGRjST@cal2uc5kjJS4*N`p>Vn+10eKOu(8_jxr4cJi6U)(Ue{oF`%;WI6I zD@nX;q-FAd;O?SiiY<)Bd0kJ~m@BO;^yW;iQhpUH{wVYomSvIT>os)S&z|&zuB6cf zfpPse4T9o7u)|FmtY?}r>pUL^17B4Lp0OqT&gq}nsVz3>v~wG0-Sr(+Bc`D9!j-5# zt_!#5r_-Sa`OIVc0<<05#io`xp?CXRy3%e+-#r}Z2u7h5KMKnZHSn3u57BPg4)(`l zEw#>f5zc3}wU}Ol?+;3_d9%;3)w6xj_;CZ6DW>6MJ4@!ECyPDXAH&8yNgV0F0RwI+ z=P|(wj~7nk1BG+4PihrO$S!48N;NF%juk5!w@926e-x(G^Ac5@I~`h~w{afU9}lEyj;^RUp##P&FXSr14JbL)7U~X^;DNW(nX2a*JY#Nw z9~(O$Yw|#R^(_WZH0?yc!%_kV*#~M?#}X``j(3*LN69%yDeuYy7#tK&xlso}w=;~q zWskFAcVDsYvVvj%)FQnzzE+8aLST=QK_ZJP)J(YE!#*G>sh~gW8r3WsVO9c1_3cSDA&xz48-QWktRUnH599Ao1??pO+q$#x!qzOBcdD8V zUBnRg9l{x(Pr;5p;q%5#iIq%VP7~*kf&sT%;iSR|I&wOd`fO%Hk9Qp;Ul_>jB-il^ zcZBf)Gkw{&>S)+DrT}`MdBe{37kFON9lhTeBVU?LP5OQ)b@~Il*x`xI&yTU;-_@|# z>H#>!eFyO;1w1R0%{(UwJl|dap=%X^%JLcXq1O)Eavt(e&5~g1Qgaj+E+k%G73UNm zV8O+{IOj@fR;iNUwnb50$n@&aq{ zr84=LGWH_Ag*ghnqsZnFc%wWMkLxRuzg!Yi{i=#Jo?UEl&`wC%@SfY-@RX@$U1iB} z-e@j*iphotVPMrISm>@yZuL{>i?wi{lQ_>_bVRWO^OB)5xq&_V6olQ`!!a^*5c*Z5 z!`1VFLZ3<*b^A|>VyBoxW_B`oOV(5GyDSzhFqOX;%%z!0&Nym`D%rS~!q9PEP-$|O zy~?)5Q)zJ^KHESlzk*@=js;Y-RF1t5u7eG>vQT`j8OD{Z5nY;eiZjl70lMxLtZCF1 zRBD&OetBoqILp!amhCw8jVVZvi6fD%l;9pd43or-Fh64=#3_f;+HGE}&B+7x21ruz zNy7P(8u)Sh9^5IhQ#4}G4h;FSLYM*F6*WBXVv;?b5U*;-4gc7WE`FLUM=ONFT8M97 zg2EZ67sB%=W3N;`{nfi7eapf{E3g0sd-Ri9XzUyGEgV>)40hCZot|u8y5a~5bIj_mFs(Fjw!bz(YWC4@loX4JSSgXL*iBm~k@}D*g_l zJp`?guWQ2C;;s@w6RS}<+RCKiS)j_0t?Tb z&^dD>sW>l1&E)ssVVOV`FE4TqU%v?7Zz+v5Xl1vjOQ6;jB`j4K#nJ`8;*4e;yM>v$x=|o`)t9YXmoaBbf_Kn{Ub`Z1@~wY>cj>bEAb>=a`f1 z!~Zkjqw4siG2yUTLJhoj&c^qu`F2%aZ}DK9nA(b~$)!h*nfT~4oz8W;1wwx1r#soT9ffroWn|O2h5bq@Be!;Cm>yps-t#mcUS9KnhgtEo zEbK7u*G^_JNgRIOoC7D-+u-emJ~nCQJ~HENajf1t)OaO%~{!M|X(XSE}@-mQ=- zcfBE9fjeK){*P@l6tN4n)eIeHGb8tz{P{H_#DkXRU}#n>Z5{oS*#=8va$o`&jFKdi zrwdukhBQ+6W=OKBiR@415|S8K1d1j(*uJ-wwh8b(xhoP}X?PwJzZgJyN~*YezcoEs zs*cA$Nul`dOi|*Qbd+%nWf|X((emg$7>ADR_0v=uwS7MIgvXI7JH{Wp)ef%Varj{T zLpDo0oBfT9r!{M)(Vb>FR4Euu_j+W6PWc^Xv@VgWKfjPzz#Z^6P7pfK^_0@p@V!1dy7 z)Udd>&RV~QMSjvDe=B3Yd)*1v9%4-;B}rV$jst96c>-oRbufdJH|*G>=Pa~wI6WN~ zgHt@4@bM=rj2H;JG5xDY#}&hh*H7J=@v8*ryD9lz8kk>TC1Y}UPA$W}?s$-ke2mlnN(>1(TCQ1T`k{{0O&?aSmxO$?qU1WJ#{aOqQ%K4P!HpogkOt zGbv787i*Sy(Dn)1qRLY%aZI-(9c&rH)g=z1C2c{ld&Dtz;GqrGtvSQ@P5%UL>1WaJ z_H+6nr1`Mba<32CM>MIhc`gt$ioq7>8=gFehDNTx~v%m|v z{Vcn7AN`Ioq-BTJkjDvo>b;tadnH$pk+9oNeiMSb+g^)@ESLx8z@em$o4_-=hWk_w z0Bn`UpWg%yoZbVP9#&4%B)+0vO*keEjN>z9uCc`{C&7V{4s>M39A-U00>95Nrl$%P zFnYA5*m}?zu-0(EGs+?wcvX+I`s(3O**5G}TFQlIy@en4AMt*;FD)}3O7246-F;31 zZGXO;cG@nWj!`b~TUHalWCi2Ys^P3ra4I;RI?OLwJ%c6L&7}*8>0lN4o7?qI0vDuN zFkV#~N;5aIt9%N$%J+ie;xFv8U5_~KTPdqv>wv?x+;N)!Xr{hK3;Wtepmkp}zdK;F zsQ>h8^geuyPG;ES+xI8oLiqv6+r64w)qjV5nSTOL*4uDU&vA&(}R?&y-i8=zScPRNwh<9n0>0L74=U~=e~T&7rN?8 z$VO=(KKwp|4q4WcpPd%`u7AQVC%s@@t1?)YNeJ7sa2*@d(t`JO3~1V)F8mirT+5>x zzGveWmc2PdY<;c^78NYzj^?*;spBHZCH61V?C=Av#4PS?=uuAoLm+2=uolmr8Gzi? zmt19XH#YV>#GtSZ6swp)S9wdCA-t1DztzXlhdR+~%MrHQek#t19w_j~*3yGBepGI7 znY~+Z0oU79p_JxwG?;UnSq%OO?w69-2%9)MHZu!{wM<5Ty;K}$6vsrFXj30FGHC&XLh(Xoc$!$v#>b`x!u4udA#vj4>VtFxLOc{onReJQ~zdUYY z+I`Ob`Wv?8KnB(CN`$}_Viu`42@gE+=vwd^Nar$4`VcE{rKaK)>zmAU z_)+FIMh0W9JO?$0K+>&qqVe}P(?P$bxXDn5&$iaag?A^=+E-O|);206m7E5(YL$4s z)dzxxCDZETYhY2O;6FZW$Te<01EwX9;EKTKdVPEr9e>)0!LO$>@LYgZ>L;P@RWA&& zeZXSEhR~vtLDWZ#&5BH-!`n{6y7{lsQS~dV_xb<5+>#j_0d{3|7TIjfz~)1TxO4Ix zlh=L2s96W!`TC&r_PK11d;wee+6+qK3n|6KACB-VsCtMoNlLt6I%f~FuSWUk=k*i< zj{EY{Xg)vZcQJ8;+F`iGFmk$~Nd588Slzo`ICpX>M0_8KCrw7eP^S#I-jTrq-1lOo zXBL_2+hgVeN!tD@nRx(EG80P53qf?}rIoeHmSJ{x2EQlJ)VTW(a;hG#Ni{ zkb#jajdA*}!OWm#HolqO&997FjLX)3fF=(`GBtD*cigjrw$)%;Z$ zbIle^zlLMtvBT(`Y=yFt9weoCfClG{wm*LFA@@K@6IUm_;5V1~v*``nL7*oy{lyi` zK4}s3ER;nbX-&?&^A*bsQL>+$twL%-pDJeRRTlcjn=YmAqEW)K;J2a!zE<0hH_Sf4 zFWq(68nXfQMjfXRwK(p_&2YAU@eFL$iKcuZN3r)qGgBF#!$1BJLn=Xsxjid0u<$ve zRiP8P%)bJtwh zC-gC$^FsL${|kceSP!eNWQe3Riuh}*YU%ss{WM~PQpCpP`X2*lOn>3PjjzS!a*eaxFkCtiJLk~-=* zBeRMP(cFx)b>o<|OcTuWx1izIX5*@}@etXXM!RO@h*TC?QqQ;d_(MjwexX}SUAQo( zJr@(nSH6)TQSd>?Js@~RiiLa&JII@SPi0E4fsOx@N!w3l)AVOy+`BnKzSp@9PCSa> z?&>Pmk9#blxckv`MD&?=4AEk@0#2~7!E-6_yFN=vm*qC@2(=xPGO&L9#cbH0_6&3N z=CQwj0OLf_RD7a>URTKBCb1(g*8I$ljaf%tSwm`1$Eee_ZS5qf*vhS&u!}dNcosNV z=-ma6#hEJUIQ*9suFNvSapS6~HKuktWO0xnEDK0@X-&;#{g&`$Qf!!hYY`~!SJ`kwhOm_1p=5TWdD*nCCm1^u}FAi@- zpYCqHEPM;>Rt|&R5qojbmyNjnsgg)dn00hKior=W|Cp2YO6tEZgBD$j*}c4Vm^mPp zo6@I%dycM0n>u-Rv1d1w4U8b$jt=;Ck;hEYao*T*6ldG9nHy;p%P%gMq7x!5@>KA| zU?ssPn!FCp6V>qSxorNlD3bpCxWZgy&*4HFH8wz7fl_SFVAO9nj2py*tzix9&Xz{I zvNp=?)1ixs%`ht|TXf^Lh%@(HLyGs}(5FQ348KYQ+vt4c*DvAi=j6~#eR-xT{e^^Q znLM^Qm=|Bfa4J;R5U89EX%2G7Qb_yz-v!wSea;Tpdikijo zbhscJY6pdh&Rg8(KTIj0%Qxmz<$b|}X1I*(2U$ZpugY!DQKNw`zVbuYWn%7#(`?Cz zw_wq(OR*NIweNy8>W}|g#Vp>2P{#`cdS4WW1E(B<-v(X~`*$HVy)g$hVKKZQtO~c; z4P+m-Mxo8~0_ty8LQ&y3G827bbKh4}mau0#Yxz)|@vlR4V*FC-NRh`0YdN;wQwcY1 zE`V?~MUWU50O?f=@Kr^VD8+CuJa}xy$($2%ABNW4+p0KPsFQ?O1&&Z>lLj01WIdgK z(!;NMFrS60EWl;_1nQMb!`Yux*}|Luk(BgQoYfOUzOmzQknd=kQag(b#IwM}{WQ+m zCyP79ZLGM%4Wu>n@wA%(#dxKni(wju2P$)ueFt+tK334Us2(WvJ42;kkHD?Ia-5c3 zfExEJNThlP!*sr|5#v|mk(8CF5xp0Gn9re*53%CGXNTg!h`SiHbQE+d33)U1Jko88 zfxQh<)aJi|H@cKfi{B=qQO|PxSbmPiSxbX#K`tB9cZXh=^s#43R+yh+i$Q6+Y}boJ zWR^ODd$&Zm6V?CZpH6J!>hiNiD`slql7cD-&E7+O&e^E9en0574Pd3WvbZTe%eh}q z^;vYV7jD^FBZ8Avf}?gXi(0B%KWso2U0zuMx#m0Y=JZK4OR)*`UO1!t>0(UZJ(lVG z=D_B{3~FedaCqoMU-5LB}XbsE;=I*(}b_s*iXCuY!+*2Qro zOYP6k@%I&lz3MPSda!gi+|gIUiwzvk8a|Fc`+P@Ty;l?#<*CzP2?@%Fgbb{2< zIV{QdMMaav?305QEp~XpKakC%RJC1HxbHXEL`IPJ-A^njI+@>~-NxTj_yJP`N8l7k z>3ZkXll13R8+-rHl2R`i2zUY=ZzH9zwgRSam%Gy zIX&>x^co$bYtH37G=QFIrx~qt9LYM!^TzgjqV$L}ec1Mh)cn^^XO0@?UBw58p5-;# zcT<3SW=#Q&-jj4?+iRj}I2jkp#&FUdB3Pr$kcP2`@QF$!#2V%Bvzt>i@~;+=y%Ubt zt|f8%Z`(r=-`CW9uMJh<-?5wrAg13Kprfj{a&In|&<}S{lQkDBnS1i{!MuAdgjtP& z8QVOeVS6MLO!nhca3S3Do{O>*&(MHtD~Mk16^MD+MII#C!_+t@@L6cYNY%B_(1T;q zV{{R=8rSojokP@5yo^GlADPQ+CM$m_Q}JdmC~RH>k8XZ)m|SLrrOsCHqtTHEBk}C> z3Av4vI$x4Xg+5f~d)pd%SLh7h)9vQ>8kZ#clA@KrImbE95D-06v=nYGA(fdbUIANuPxh1shCFvqsv21(wPD`S|pJ}i{I|08NK@qL&jsqV#eaI=iLeK7Zq9>MQ(@lw^sP_HsfO6S%x$8gTd~!KF?XDrW zI^((HoFguk{7FTkp3>j_d@r*229^YWJi^oo2p8Fc+Z=4NO>&^!-J_g+dh zl;WY{Wg;5hv?BlI&O=S+9Lf!w(#I}U2+O#Iha-XRk}3^NyKt<;PqzwVDGPkLBnWyjL&tNC=d&Md5JD&XQ{#ZcDS5eDTa zflRO-t#Va@;Y%5~ETfQFW@$@b?p*?+7K&i@bu96I?MlC#`$Z1zh$neBYaLV%RKWhJ z>C6~|UBur)4X+0{V~hL;^2}Mp@ri~fE$W^FGaRC@JjV?@kMAW$zXyo?0B~u1uCdsv z0>_5l;Z&855UsKv5DL3#AQ(5*UbVvJf|=y()pp!{P7R-q`Q&izPI}`al!L%zaq>d0 zgVrbOVL_cHB>3;b`W$O|C7Y7xpAzv!%PgGLRz%%-&c_0&SRCD@55Y$7xnREw+#xc7 z9IaSD(s|zqYZX8uA6_HM0y!-GEDG%2PG<0ZA(#vf+G~6m$5rwy+J`2GiNdrl&hqsw z>a7(HTI1K#1EJxNs-;ISuTsDbb$0l1?;%_+{++hX*TS7*Rm??K9sDF)hK4f|iRJA< zPONzX)A}}^>c~%{=OPuL#C8jOCBx)pzd0;fW(aHF^uW2cJggkQ2*%h;Q%Uz@7`)#e zytn$ouTKr6jn6%ex-$)Lh>Bp}23ee5iCOL*`3I{IlQ+xq0&<|9y^gWvfrZqows!!D}a6`NuyCyf{gg zi^@4VjLRmqzyE`}7;|E2rbAon%|RplKjOP#5{g&Gl6mDpC_G_F`x3X%5$%W6DMUUl41@X<5|^XrztnK>m}iUr4Cu4L5cl|U-YZuaeOg59C!8Efo*s$r04dMD~)En z7pIx$=q~{+=^Hq<_KL&7J%ns7SD}~R8-vTo7swr)4z{YV>EMY}Vlv+f5-fA5%T;ae z+{a6_GB&$mH(Hqfwad+>1 zGGg(YI4|)5ov<;O@cuj#<2RkGJCcD57O0X#ms7B+^BMV4KNe$mS%OsBbeN+%Ox}m* zbDwSc=m*S!rgAA5cj6*OX1$<2f5qtjk9Vn}TnzZ{o`q)nC!n5e5~m-slTJD1LdJD2 zfb3Z(@!s$g(k0zWcK)=Q$x4>48UaWOZN-^j^GAlLirH=9WT{d@ud+r;&baR>TRTo{-B= zW|0mLKk6yYGeysj!jjxYj8B~nYPnr@m|-meF=Mr$HXsRQ*1uEwWT62!mP)u_Sw>`ZcLMLQ=*PKV8Z2eF{K3u8W{&|v=Do2#`C z^(Jw2%NuEox>E|zyNamcr40Jyu{O`OIY6^smC}<2GjXncFMY!Exg_0I5vRg<(pNhL z?t2$fu?uk!f95Dy`M%@E>O_Ofn-pT-Bjk*iAL9MdKWMyi9FzC_GL1Y|!KhpRX&ifY z4M?a6NoC(kYWKd8P7bWVS(7V3Z22)1$v6!)|FX#)sk5k~Bu^et8Spe9ocu>S@%%i?=wL!Qum%$Mu^ z&F|Tj7L)2`BkZzv<6RL^Bs}U2biJ#AL90Z{eX)SDqwQn{_mwJ1O+wGOyYzF3EL`_d zAluAKn3!)7@N3p)`e(wBL(nv98kL_6c0Z01*MfLD_&S#8Hs+EM=OXGqe=dLR>cV)b zaQMnfgP^dG9`Go3u+TJuam$9ut_e@+Q7sctyWkErhx6!z5G9Ctcb*uzH9_5wE%oji zO@6FC!%T87BKHk6Y27?&dYeB7rxa&{rxm|jyZM#c+HC}x*YQlrtOgprQVZMR2sNKm z#9dt^4JJ39a$`Tp;_xhSR6F^cgsqlBje@zHs?T3Kv40e{YKUT!nIW$IxR~Fai$jOT zKVl*x4HcGm$m8w%3I401s$uf*E-VW?*GM~-?-p=}U!-v%OAnBB2O_Dcln*W1Qp4EY zn~o$Y2Cro{kf0suuyxEdfJey=pRMglMx8U3epIAi^^Cyai2$tDx{=F)>BP5Qm%09T zDjc-_!Z@Cr!gB?p=!lj8ueGM|nVV~LXssN*Q;>mswr{`#f^B5Xc5(2IN}*X7r()FS zg%~>3itkfB}%1!XWI)#k)(xcL; zQV`=+O~>bHF`Jym zGSDq91Q!HMAp?2=(7p*cyNn7vcV#}ca0sS5D^8He=wR@jyas-^#89_t2RzcU5C)V3 zVL`e9&R+Hdic8N!JyQ;HJN6No@zcn3I*ulHyrO3Rr8bI+S<=99qtU)`KF+^`Bzwsv z+G?^A2VdFZu!j|4yBlG+EDUb`cbxwZK0~MTys4h(0V1lJj7{QOc>qE08n(BOFJ z;ge4({d*zoIU!^A6i_$Qqm57(OSGg7kDd(^-%elPu=k!m=Mz)y1PxH??>YlpdRXK=54D;e&RqM`3p znZK5^k$7g)n4UheA$Th0D7>N{91e5mtZowDlzeKM`IKZ_bHoL2rlY5PAY9TtO=zn> zY~Z{bj0{pSHSY@2IAgG1(TWXk#UQRd8$ViWkwvRsGP{QtGut*q!rkUgocRfRF51Z& zvu(%WpH1p$?Nfk@3XWpI-y=j%Ud-`>aTE5xl_o;3pLk)g06%B#!UqZ}blD|2EVPva zk)7T|r(q)I^yEkg=7FdmyXrQ)4U520t~Kdx+)J<-_B@3FpCkW+T;X!X6$05Lodf&vTN z{{0}19k7C@%LnM}!J{O)%Zp6X6^9R&(ipLAH@Cj5m&{X+CC#PbJbyDD8k;GJ*)W$% zs_&(*9dDC4Y9?g#&LM6UpFiB@u@ANe2E+Z|(Wrf`7+TsLF>hfl#HNU2;gY3q9eD{s)$b|sfUV(9Q^NtkOlfFJnT&)%jq$|G*+&Kc|ALilSE@WYUPf29xQtSFu; zY9~e;V$r2{A)eH`P0g!zk;%n>sBBOM-fWwS=R!N}w-mo7Tw)NZv_6c7-nWrV#dDar z69snk1DGH>u1)+ROJ60skQ_d>;{{JvgLQy6G`aJL-3@EI8QS~oa;ax;pm zp5l&d<#!7ARfyg1d#LMc3UBTT;h%;W#N3bK-KjMW^Vbz%;o=e+F0~LZ2_+g+#=Rm# z6?aIOY!~A_`6BO+X`yC1r^)ua?oiG1yc#_Mh%iSJrby?~yhemkscz`ED-U!!K5$oz zvtWh0Gc$GbdbpqD0PQ88>DTF7q$*%_IjX>t}lmju$K zJFbG~nKbO>_ju;Zg+$d%kCq=+fM}^SSZ8?k}V28c!`Qis9-1K9LQgLp1s4Ebi7FYcjA_rYTB!BW#LjBW^4Gs3Ol( z7s!4kFPq(Ig47V>lQte+A6X4Eug!&1qpeWdKL+KtKj0L_lTm8yL^M@ehe1YW@Y?qv zE}pJ}iW5pmxtbx$Or8k)19k9WMl*T3b0ccaolX1p6hTK$2iO`NqGMyTiQK?e9On1t znnoW;j+~TZ-nSfTZ_e)xXPm)o1@Wd6w|;?lUI!HAhe21>0UXXcg@z5{j(q}SOpBZY z1NfU{F4_ea_2tyj!Wpl>;IFk5q%H|z{Jr%c8eQ6o+gCIZ!QFYU*HT5do*BMD$l?ozlejTBJNl(3;r|D*mF*b z4%Ma#sGO5b_45C4X9xSplm2U5*dIv3tv@(=NWnV20)Cs;!@1PwToYq-qBz2 zv9$?_cr?1nOz4EiCUp^kY#MX2TNz8Mqxe-oDwFa0HPab#5O?~Q&_GW^Mp<+i9G`nJ z^)9M7rC}C%c>IO)hUt{qGL0uYExCvpg_S?q*~E(j1z#{Gr32kGbH`Vv8yr z`|)L(8rFOAJL+XgbVcV@kV=0_**!9FJ~|1ncIn{ojLn3Pzz|U3vzH}ZRI1q?JX}Pm zPT+j_acU#Ip`{Cd3==R#Zx;O&n}nU?jL|zFf{ge5K*j6bA-gM+ULlm8I$J~+{D>sC zbw0teJ=w6?FaqD(USKryi-^0d52njbMw~qX{I8_Zki)aE_^umic%g=e*W2*j`tf`v zxDLmSKT8g*Pl4WQBcdl(Ol=Q8r*3iI>D!+Vn5?#R_*JI_nWlD3#pn)v)m_cR->g8T zA*89_%%FDR5ft5C$lq7LqR3`(7!*H2CMfKr*&nm1(#y5@+fN!N?jg#n}GN~q& zZ-7(M_b*JfM<72BnTE{YW#=e!wo zXU5RcSF`YBPbM|p;!8)1??>IENM2kFN3UziP^VLlc;PoS=lN7E`wHm67n|w94l%N# z&z-Ez;+=V0f}!P`Ic9D<4eO$MxY-Z;93I#dLcys(+Iw0OC%+YKnwgu8yEwq0Jf^9Z zpY#59P$d3gA@I58If=KIW|mleqf#B_^+uue%+W&t1#! z>FRNj|5)&u=*@dFrNL`=Fi0vDa+ZlF(Tk*jo@*?V&brg1I&nBlq7at6^P&^QV{!Eb zJ8D}h2e(qAncFk|<4S-0VIK4>hNgxDVm3h^np@V>x7TBcL3;>vnT_W=pq=#F9U1J9 z5~B106%eV&Ma!82dOg0L+g@BnGWD);n(kgO{p>E(uxg>@M?1;DBp0In*BR$#8ABT6 zLh7U{F5e=Y?D5`2$^|EIt+)j_G`|*vX}l*@N(Lk}E>Z&;g>46qa~C(fVd~5D$U7Bl zysjRCUvr)lUB(y-N({;CHJ$L;Sq+Sfz7o4m8*a_EC=y>;Ngk|R0G{gk=-C+smjb$p z(%2L-?r}KUI#ob@SO!`DbQE3?(Sto|!48#2Yv^pnrw(cElGv9#OrD*;&jj?AqM1!7 zeKkkJai*Iu*?F@Xv@Z^3Kc7VXs%l8WS}XE3z`n6* zS~1REyb}C-o{;U9Wz_3}3G5GAOR(`g`Mzu^#;pO+S+|k2WJgo0o%%Fv+*h*a{b~3) zu!woR!wyvV@=0dU!}uy_2BzsH&B3nQt)vM$P6dKtS~ijo8LpL_`LD58S7|N z6iXM<9Kbv;z}(^pbyz^Ep?gi;cZ&u5y{& zLb&36gYKV^&&;u$gVjlg$^5I*u+BIY6~5+^@27dEO_eUK|GJ*|i=^UMPgg3h`;L5T zjb)aUh+%MoDv@}S!tLDQ4_ov{h|K;VhXZPe22WIQoZ@!8lU`0&V*pWH<xuV?bM)Jm2f&Alh-V#?M zv24smZd&$oY<)c$=te)hnD&nR&G4r`FP1gV;m=k}mZxA`(_*Zg|CUSNp^t0Lo#_$H z4VWn3MbB+3qIQVV?yq?0~ZbkyaWm}1oTLji@Ddkcn)Wsf*4w#b3LcMg1S(+EHHYJ$hl2>L^-7Zju&>Ray| zg^W+17}HE?T&2Dg2Wqt8sh0^I@pGVSY@=w<IU?(q5e<7s5cj1M&EhBD=5A7LhFxKb0z*WB?OO*~^J z0ZSHMqR%(oz?;uZsF%45u6-DQ&Hayw!C{KehCV?{Rvgi2Hl~*Ga<nz4iB!xdU#P`@oHy%bJEseo?Vx?Z%N{7JNQrZ5wo@T2&C>QB&+kiFfBJPohB)Bu}xC*ijIO1S>?cVo%De!3!A9A{mffjy5L zKuOzzsrmWD;f!A}j;ykwe<2cQS*D``IZykg+VP{qOrpCu9a5f8#CE@AS}>Q-G)NiX z4e5At^=LXx=D)K8am8XOGZb}N0Q@!|&YpCkac63weqAn+KfQ$Cu{pp}CqC0**+HLs zKd1jaZ{ug{sZ2{^I(cO}M5q7y3^5CC@tn8AIOFk6I(2>_=`hWsQ`aj2yG#+)&Tph+ z52|2ntS{Q{Yo_qc%o-%$&Rf9)p0lX-@HyVk zv?l{1m6(5TJ{o-SfXh){Wa&(qrr>ThtWyhv!BPciyZDB7-HFA)Mdxt(2Pq7-PXie~ z7gp+~%kO-4Dqqq$@ue{J_VKOq5y=G@r` z8LDxi8MYq)=>0VcIyMR6o?c$a2u6?y$LpE$3d zLW^5gk}>LAiNdM5O>ATRy81Rgv|NNI=?NW2%o#M{3Y@0~PIe?emD`}R5Ikv{krA~^CM8@td zZM7T2z1xsWG#=Z*)-}!e!zmvw&&tK=DZV)B&REnpcnWdzUZIVtFT_MHC(A#!Q4wWx z*xXWuKg!F|W$#7Q?b=Vjc_%Sxi6XG0R*$I0-=aDbsO-soqK6y`G8530Wl5AObd>^lof-u)@}`6>w?nf4K3L9Ul5w1=Yx}P0y@gjOz~L z#dXVZ|6Uttyd+AecCEwbzk7(noLp)U*h_s1Cg8ESFZ9z)6WA@Eg*TQq;o{ZHaaOt~ z`RLkCfBmTf*XKM-|KmM+t^F*~**FQiJx(!Ol4YpZQ7iiF+H5phuZoA-(%{(6Poz&_ zKaBb#NG!m&RqFkE9 zM0hb_9z0uOMViW0@X0lQZurzOs%LBtebSdnc0nm^+~h}AUf9m;`WHhoKSYop{;`dc z!yeRSfd#eEsU&G9eshOLJtqSNdx(Ky9CR=GOCP+KfQ}w%d@hWDslPqYI60H9oqLRY zvp)?bn|8s>yzB66KpvWhM#G~=LeRQo30fh3u%tZ{ZZ3($^A?5FgZCM%?->nGBTmvf zk3@RqP4YUY9!_gsV8Scg$$j5WvRqxC?gRt-9a`aZ)(J@}SMJGe?OlXo zWfTYOPH~c61Eh751C+$iK{xgWJ1$0FtnD&&7VZ`ydt1h_bD@;XN%nSPDQf*4>{u%fdzR5d`{Sl z%EtPT>1S4vCy$iLw-b#_Ono``{qQ&1YvM{*oZP{fCb}>)^;J0wnY%>e6NSH{pU^q& z6jDzp+x>|K^8an7sqben>XTE*d$Ic@r$q!^E%UMBSu{+18U}aOr;!Uguo_YIiMo4o(}HUd!{GN}j>7 z$;N1NeLo$1BZ|Yjo=}CQ(Zu`55~8Cx1+OJ%lcwF#%*(wI*s_Rc@ZH!$6~0??l43__ z)1--TkDmoz?bK!xXFq4OJGAhIy*v(7zo2;@JMc_XC24paPtyKK(ysY(biaBKPWur? zzD=`)(HXPJgk>dgcW@f~yLy)@>lI>RHs8mpuS2cXg(Uk@Bs1B33$B_v0os^uFlanY z$9Hv+6^}UlZ1sqrqeejZOQOSmW zwm!?SDo=^6ToD=FaGMmribadXC9tmOJ85*DNr#$v&&_yI>Uq@=8?<=8l;Hob+d*^f z4%g+`jq9+r+s`E0Ws$54U2P6^(F<4ECBF))Cg>`iwee~+mMr!MxoE6%MJ6P87`RH2H z?zOg;jEwEta`f$@mKNH+Pb;)F&zGuWnhw-SY3FhCa=)&9RwmcU7gbcKCR*?VSf@cE4@AY@1>v>O9-SYIQkDJMW(-Z3iwNul->@ zsjlj0l1*7QW2<3sy-w99t9H>>H9Hi4Q3Daib|FjV*gD;OT_=$rYxn-Li|t0^akWdf z>RiwZnQB}2{fcd`$p3%G<$K2qKk7VYFWmj&^v7fwYc@uXm6DQVL)(>E+q>7aP&Y>f0NuW3T-+U!@-be&H<8P6$Pu7Y3UE#<66W3xb|5&nD zRGZkKs`)ck-5+89otF|W6j{Mq-U=5=@^((Y>_n$e+8)9+`kJiYc^_dRdy*|1>~gxe z;1fGCJc->NF^<*V%Ca%ho$OiDgHCzB%-9$=ZPsj(G`mH63>)ZukiEJ{AS^BS5XNWs zJDn+=$bM_RE zJ>w-MOmAk{UyHx7kUvgXfBh~iC#NjD8>TK?Tr3ni?Ix_I>jSo}eGfa{PFwilxhDID zDQ0i)z2YRny>#kTSi*LV`{}fHY^kvJt0DU?cqcnzvyN@gJ-{9`En%6}bA>SqKUk-x zXKej=dEtbo^-j}$OPo}!W(uc=*$OjySF#`#$QHYVvIg3|Ea}_8j#*I3UV78cE?)Z6 zN&etQc4|Zvdv$Sx)4Lx>Sm}ic?9NPdQZt(>eCO1}o^Bs@a<*J5bUD$;e!17qO195o zU$5|D%L1*1`&BL3XHyQdsiRX_xf95CuqT8+XAQDjlgn6lPhEDP$e*pz@nE0zd|(Y) z4+>qiwAqz2!r3_jd)8-Us?c`ik(1`)N9?Uv(yYm+$-)X76Je9o3gJ2|aytGefi+p1 z%_gddu|H&#*mB)Lrz01Zvp!cw*z6s?Y)3%{J9DfyYv1S3?w1Z@>*nlc|1G$|9{O*g z&{VsCEl+P_EzO+RCXYW(yWWPf37%#`N2z;G7r&LUadzj~A4{9rt1I2u+_SF29jX%wZ}*38t{fy=$y?K)T|UzWig>#uPgf}WE?y0^Hi3GePpX^s@ZXyo!EAB zY1XWI6Fane5v!d1*@?VIR$oY2gD>X7Iexlqw1&HIhTsUh@?i-3aCQcpl=0JPo=Pjb z+C)h>eTTF#DmH>m>Da+O`&Z?eJ6Id{4AxLxTR3x5A^RyHls)^}Pk6}WCOgSd zldV`1&01#$u+E7_?2DYKLTLqS;p09%_U!tVti;(R?CIUXtkgLr;ku?-tcZOO`@(F{ zX`o2Rj(ibeQ=EfYP;6zD8VA|Wd3>DXo0>4a{v#Va={s9F%S327rI$^}2oU=3j$?m5 zzs4S#7tN+-N(tj$tq>YawP(lw*X^Y8QAFszA%b18I-I@yK!cr7%(6M$Zq~{6q*G#N ztkAE3jD0{`eXrTsX&= zy?tl15bshp_3mk5<}D}oUb3+;+c%okbCDH(pSG7Bnkx`u@@AnwBgI~@Qf5`Ocd_?` z>cV;1d93v4QEX7+U-pMu3wyEd3)`-}p54_jN?3c_ll}fVL%82zJ!|Vx%}$Yh%}PI! zXKP(G*#O%rcC(hL@Ta>O>)9{Kj#}Qrj?{UueVf;_h7XsqyQ7-eAJ=EFD*{Y~`JgEr z{i28!$)C=ef1bd)H1rEs85Xk#?^v>~-Oj=iEpZ`^b`Ty35we#)XbMNTY}U=OO?YzC zCU%-=3!C-uK70I!E;~cA->Kecv2gEZ%1U+raZ-*D7p`dj#a^?TC%nJhNVx6TWFd3M zhV{!?&QAL(B7EB4$jWV??CzZ>*y%AF*oxTIY>~UTFl7A=c5D?0y~^d;GuPj;>L(P1 zSLcheTu=l%_-!T2#M`mUE}OFM({+SJm%7>6yO*%_5ozqdoylxMxP-95kH6N{XS2WF zS+L^6Mr_%`3igA1oKW}MANHP!Im=#eU>{r#W^cTkAyl-|VTHnI_Ck-0aHw+^d&Sp- zeUfmM?euvrbaO8fZXBJ)lJJFW|D^$^H|0ZYOYuZ@cW^1YMtd_mZk?&nCSyH2x!0Wy zU6{+p=sU2fx(kJ!^`@+?m7g$t?NOHOHWLm$f8@0BmIZq+YcrdqHiq4yUdZ14)z2O& zJH|3ARfKx-sqCEIOtyFVAp68Oh)sVqL0D+LNoc)Kgxy%J${xMY%leQ2-`l*W0f@H*}q}Vtk;pBtZ#xoJK@!Lr&;U-Hc-k&XppNd z^f-B$?Q~zs+JCKJ)&BComBM6UuhC)lOKmtiWMeASF4bXceP*-%4}Y?H8$YnGj!Ux< zp~k}KQMT;QqOn4Sbq2zTC-vFB`%{G-vbOBJKM}$oDsil%(@dddgOFX8RxCW3dO-N# z<}atQD#B(P0hitL)4 zmrhe^Zn3G))!4XaYlY!4vssTpYqrzH$)&fY9$i3MO+Xz zw_QZgc2O*>=(M_)0B>@ zhxyxVXsPNpGxv1PPxku>=|?JfPdtDQU#v!%IbJGouY_u1b%qC`W> zCNUrUMpG%*Y(E zxQaXQ+^`w4zr>TSWLLVT?;IE0BujlC?Bl&!Da=ZdnN%ewlXPPZm1sCb_8*hQ$KCH3 z5uqBhU8S4WOijmK{jaIY@yoPvsE;l&p1{v;rsK0K^-MrydZSlNG#&5CvrMnZ(P8;J z+|?^)u;;BZXxH0=)@32Q?dl{-*LC?`tQYt)WWj+^+zBUsKXf4lLsG^x9q5U~phN$t zmA(@`@wC9RN|AJ$qZq3C1>g+M7M|R7A&J#I^T;HG__~ULaotfU+!qCvd-ch#`Ve@X zd=WV1pCn><594<>i!A7WLB{oYfWufB)Z+cy%l)I#$#3|%TSKOD9tr*> z4LaVwFfw2c+fR+B!D)@$=bOjrvyp19At05k4YT3)tfN4C9+~FY$#fL#Cs+P^M4StE zkp3BQG~wVfA`HGsnm2`!X3cqYYr}8Z0rQH4c{?eMS{#Bv9qirMT6v1_QWP z_)p;^s>X-oXPF^RB`1S>@pC*mb$lN2HS?jjqA$_HWgnUDl2J5LT9cIBl*h#WK0LMc z5Iw%77_se~=pgc)&bO^P?4{tP4B5a`;ZovK7RWpuR;7QPBZ*5rM>lRa z1Fe`Tj!i0j7Cp|8{*F+h_I1S$wFkB_7iumxT=WIbru#jgliyC;-V@s1;X-XP7IHeC zXNXy97=Ajwg0f`7HTcn2@h37!ESRZX*pLz?VV4fcj!jkxM`1r-tJ#? zNBvCNaaa^-W zzsuaC54EDXm5P(GE2P4HN_;d`OSuJBnRCEtL=N=kO(C=Hzl4P+iXp4$15sWt4Kl0s ziLpy5G$+YWRo=zEYRg4*5IK#9ez?%lspX_uCzT24-o_n^xky9$Hd2RU1KiOqVRYin zF7Eic&D3pbF!e57h_VHSFds~TWVyll)N~Z+meIz9b2Mf_6seear_tq@8`@l+NK>TJ z@WWw09O_ww%k!(~)7CuNBq0eMY0*&nbsco{PonWFl<3tIx)=xeFr)_+0x zCgKsD+IA90hP?3BrJdMZeTaIT^&ux0n9zdGx6H;BOQ}FEaJ{**nh+_Rj~iv<;BPw2_?7P-4CA2GK0ax?hu&x8cl^$;iAUJf94oF#`!4cYd z(TOL-Kwm}{B!`YMT2Gf^QrxJf_>Zo*uiBkaVfKPYLW50lXDZ$kd$= z%lFANzW*YCI=&}oe^`lz^uMElZaXl^N`UfxqB#G`L0U2E9k(oB1D|RqGHDj?=-62g z=%(&OI_0n)j7Iw^&Y{0Qe%O_rj6z)U)M_y^6JWQEheT&6KP7D$Di6gS4= zsthE zZbHF~LnP?raTK zd1Ny&ug|5Yx1NIG(NXlbyd*eG{zk8-^2~uR!%XUzCery%ii93mO1v*=z&gb|M&&sR z{e4l#A5>qKz5wht7Z@F&q*$6(8m99XnX!r_n1B+{o@Kq?+;!Y4jE9(vlG z$gey~=I-v~a;AimEemgP7eBh8)nP4syL5!!ds9b4TvlRhv^dV3eulP6Wss9g1YCVmo#-Tan~}cWM4&&NzH8huxc}wowLBzI`SCayp4vK ziQ=#FV`wL}1FugyN%0DRLy6m&&|i>zYD3_+HB$^<<3lyGC^=!g*Ewd=<{_fY7VD!uBW%qmzQ(N`0Oi8 z!p$4p@v}m(Iui}!^N(={_T^(`bumu4{Q?XgYdAjgngTc8>NRyP)NFb**%x-{j|Vv= zDdxo~YwY~}78@4622ir77>{1 zD+y9#;>hP3)aXS4Uu#Q7qA(H%t=e4KrHn zYw(g!5mvcZHavXskZe?5NA{W@!H0M9P;bdOMlFO_7wq*ST1vZ6)A<`V)ZQV|ZBgJH zT1&n}+{Hpg0m8o{v>}Gil!%L9cFZ?gGm7W-?;4=(Yl`Rq^PV1ED1+XMp5PP(S;`1> z;JAc8Jzv?v@7lA-Gc9qF)09kZ3a{258M#MF-we?g>aV{P5 zXr;>r?Qu<07!5wQ2;)+VX{r52db+O}V?~4x?qdSrm6tWi7q+5?(;M0uWeRj%AzWBX zpnP*Cm#QnkFB|$C-tHbl8|W48L~b*cyjla<-=;&Mm@`~@=?8{?&O;NEMDK?FqqZd? zc#%JwjM#QjRXJOf{HufQDT(yi18E!^q=O?)Nz`V-24?#;d-x!E*CE06H8t`zVhWzD zBL}XYB6*umG4}rs5y1_Sux)zK6QWV1LXnC(d#xxkMoC06 zX38vulujxYQi%#hB~dA*kj{Rd`b$C)Nro~LN`{CG@virS>vC0msNG)cdG7oE-3kia zu}DsuYb}!Y58WwsLT_y}ZvI7^? z4u!0#%-1{fC>0M-hvIK?m5NGn;BT{;n%=de(&BosVGOR7o2mqUwJSYvW*=StYbjWl zMBv6d36SU$K$kwaEenWcypntBv|i~7aSn(R61UGLWlE*OU6Xt=XL$h0BmJq?m~^_< z){S~=ET-@6Rq5b>WSSV{PH(J>A;zT~jpzv{Ci~>5fyO9u@dHmLusNh!uAX#^8AvjQ z>)@Fj4hQcwK(&nLP7!k*aDzCO=-zTAD$PmYkyk?o9$n2R)vSjm^C952;xca9ZC`Qs z{0}gXXcB6xqWSRbAK=#GLLdHJ1Z}5NK&x*#1WrDJkt$V^=W&`CoAaEnP4b~D-tD8q zHi(J;w*!&~GYt3v`wx?txMF%q@gRA+gaZ?i0!B}?$IU+Di7n&YCt52&jPnKZ+bjzD zTn+(?l`g`nQys+JkM&V4mEw_u&6xT<2p_gSBy+NkLuV^x=L&0r9{NgTGf zy@8e0)kH&S1u4yqpl=5Ct$eVAdHg(+=*?}`)WMVK{1eyXc>9@9L$#@9{wUt+(F9tl zsDmPlTEXeIBMFiy%kGrLQTN4b=wihr^2_%FjC?IG8P_%!xA4cP{OdvVm7zO4)A&O= zR9GHy?F4csyMg33#^Jcp2l2=3BdCt5^s8PmX)I>Cc54hA92JT8lirc_Lz;MX!%{dN zc$@t4+k=XQKO}u^ckxO_(}@187Gl(Tgnt`pPrqzWBHl6m(A428c^TQ4nj1&bRYl*4 zo#+s}xa3Cbj;)|~hW3#C`})(UMS4{Iii8&InMYTYwG)@d{lbXM2$=QgD~WG(CthZc zct7nVmal9jQ@@LG=4@ zqO}YRU;Tyw%g6Goa$WIN!$_RvGyr{8&W1a^#;{D+7PrT_L4H^?z4G5sw(sjt59|RKfS$6=AD~JY9C{u`F>=9sm8}FG<0~3;c54jMO``yTkm$ zf>+93q6n%oi^SECvh@kJ_+AF#8}HmZycotGyN+Qx94?P?fx7ql%xiH3X05mcLBSDZ za6zq*aD#wd@i7?tc@(c-tbw|Z3t`t@HlzLNgA;w0;=4&9FuTi3B2_5EO-c#BUQ$vT)~R)BkaG@p5W3T@tS1@?Mpf}3Im3~hI1Y(7ore)4V@{g|tC zsGW&tgZevfTs#G|GdqMs9ty&c8EJgGd5-XLYYI#@)_|2dImA_BE}YunB1}-4$eYIK z5tG}F;5;Ongok;MSMvx=OtgfGSO0;^-pde`+9upmEurDd0_k7dVdzy8PMhum_0UiQ z_4)lUHYx_EHkjbc$r4z3s(oB{YdC-7MmBWJGbWE8s({WGcJB@5xKHoJ(&fb*x3yeO zYIs}B`P<4%s}$LL{}ab8U&Bc)QstyWi{+(uA35p$`E2*7TzMu0vZSoz=i`t8_+U_^_i8$Czv;$q~Z$7hM7d--lK&O_VRs z#$I&?%nvib%Vm@3HRmWAemjURc@ahbq+BQGDr3n~z57IGQyqEJVo2*5>vh$I=cK6o zJwMZcgVp!V`TD(@v~&Ff(!1s|8K<|2c3jk_-4jRB)OEHr!y=WYZg!;Z@?|t(=nVQS z|FCd7DGh#=9mn)-jE&uyS`lqg3h!9g@5fyeaE$LL{M$bYyv!($;aLi|Z85`F z#Ii1>f-r0vZqW-s!+s&?@$8DQX!RR@l!hUxQJF+1EUenfbR3>uo&fLH zM?u+(1v2ZHPr|=Fn^DN^fyQx*VD^*WFuiLi_NF9Yyz~s1IaHB#X5V3TT|6!xVvWzm zhFGWA2s__f;sBO$SoLKzI_AO>*Bvc=t+KEWYel2_;Gs3&m9dLF`3Nd_= z!|OKY^0y5Rfz>S)?3)-s#(ehS2mIO&mKozQEY}WAeHuv9t{pLgC{{Fd@JN zw&dCiS1Ag-u9zDf%5mQhi=;PHL{f5#ldjPgad+5rJ9`nw35uNb$Ze5SD^tYHsuf9n z*z9;*OOCsmBj!#rJz@S0PCA<1PnNQe z@PUsEFCdD)KRWsHcZL6MoFFO=Q((TkHhsIc2cn#-gaskd ze&!WuZ@C1+hYf)CVac+&;wm`&=RB+$um%s6dE$n>btI_84=)J$IELk&T(%y7gy0HD z4Lb}qKg*%PdM$h@8%^rZj3bE`7w~OsRAnkD({UajY`yf5|VATfBB{+>fHJM0c(!6U%39QEph_8;XHp5p!^Yy~ya`w`)tuKd zH>CXyizFM02&8qS!_$ZXG^aqD4%RlL-8=NC@;E6;>lBmHE-&)slO?^^8&4G~{OJy# zYC?`Omh0Y7q@fqO?>IeqGZrFc8 z72xJtNxhyK@a55$zsS(o|14?Sh?;CQ` z))J0eV#{$gY__nLaokQln!ed%nL3PInT-G?1IBM_V z2YV(E?O$JHqb|f_7Sn;Onw6aI7|fuHk{4l7cq_Tya9p@4i>J=FMiSGN1w?jRiC))g z78=VZQ90d2-rTpF^s>ED!RLNLTP>yjtvdzP$T#HCW46DjY2=4!s?$I2l@iM_mr2K+ z0y3&&EZHX3q94Wm`FWc^%3i&%fC)KfjDs?q-uF)A>#iu`h^R5RQ6q-$tE)s$)hhCr z^(Wx8I6aK}k_Pn~W#BQ~k{>XELxUgMIQmouyoyyOpTh*`9J?G{uHK>#znIgYpdGZ> z(}%Wq715zPf?!X!3OO)jHas)=Vvxk%LzwVkIk5XLijcIHgw;01LF_Lyq=8`S(A_` z8yF}7!%e^W>c3{R<;FR(@pUAhI`NtC!#{}x-u7X=NuS8wY+L?oGsijX<)m94anf-u zV(HAoBJOm6yfnmBB)vL~<4#Q$aoX(u62Q!TUzr}g?v7YGG)ByQ`M^o*zR7W)fa5}L zbKIg$95?j4ob-Kzn6uLrafPWI_tb>V$Rd&S*zZVk-TObXGt7a$`=>&0b-43t*-=Du z>;>ZEZb;#e3XK}Jj8Ez86%N=Rg{(p)tn$AFw)ykm+?Wew#>nw>x?~64GD(##w2)B$ z^P2Rv*FmygegYZfT};MSyVIs6mS8_(G5Ov!m?$gdkx`WakX5=8tJ}(n-kLkS$*qI@ z^vXVntAA4SO-00LTWICEtYzfPl%Lcyvkpu`Jc#zg2~OACi^veQLFoQ!-{s@9+Uah1 zEMI!Tj7+8JY^>ey92@#ise#LPN1G%PY?21wMYC!9DXWSh zFQ!&@**$?z#g25n#&mKll6msmR4eu5Kf={B?j)sunM}3nJV{QPPm;gyB5|gHL^H+@ z--UrNsPdI?uR0fo)Y#$DInQx+p(S>1*e@J8$2jk2UZYvSRo=_8llP|DWXr!S$$^zx zf@rI}P|lwqzwR`WcBgm}cEU@T`^uExy7{uCW=1{CzqyW7%zfsxZBsBP#l(}AcVb=| zP)wFge+hk*XI8e{s3D)K7dy9XH=zywo|P_p&O^Z>M-sJ)Wf-D@NP2uEybhR2`YXrE zR`yK;=aNLWH+;_bPtHZxr!GW!Nh!?!C?R|6_F`1{F=8dV#*5E+;-3E;h5VJzg(W!( z&hB1^2;aD<@=>^)j@6Ifd3K{wAJ_j|wY;q-c|3gEP-fMj`109})co zRHOW`?>j%NvEGm4(}v){bA+!|Ukc)1XCc1wIAqDpW$zru;DIoflYNv=RvN5i+%qq@ zdwUfwn&C#QGvARB^6JD^u*0hE8+gaI5w6ahPpp1AV7x&Up1K!~$A8U%#b2bvha#9v z_eAP!40Ek-l0Y^uG|a5#Pt(DY`lp%jb=o@#G$x^ijRdpf|?c|z9B+^$}M>+3*9gZZO#Xwp}P{sxwnDKTqAU6z4r|sQq<1shC8a4 zWt&1-AJ7D*;~O=@AeRkrG z7M}TPh1;55!jhKZcp&LD*q6`4ryXjzaAmeonV1Fh;{u^#S_Ej{l`x*lGGWh?Y*=RP zi>+;wv2XW8+~m0u9VT1oPlkfV!XYs)0Q9VK8E`38yy4#PL=AJCB|`H;4FEM`lkg8j+Gn7-V=*<>mu zeHLVaU$6&+9e;>Ji`=R19~Zjl;#1&vb;E(sgG?jqQ(0z~fKC~w;r^;rI!R}Q^VcJ( zI6G=B3~|>*$(1zVE_|bQr)`Ppb;iT=kA#JdXM~KLWXbBNR1$IGtT6lYB>Z$X0-SXB zJGJduKnFJMq|aZZz~>&0_25>~_%~-rjfXNlF3~1t9{I$Pv7lS5kHKKWyC7d*2zU42 zhbt9lpk})~IW$?7Z+-ok1T2(i+?mIcA!Yhxvt^L5aD)daKOMx62r?nQKg{qfhyTNPuPIDS^VE%-L{yX@|24wI6~__i@t#CK>DU-EAfN!v1l6sP>+e=`5V zf|*-{(3Uy$_sV|ZpwB1d@@EWK=je>Z!d`g| z=P!!8$g0@4!md^^z3Gz1o3$_|cxsTjpVUNtB{R-fk|oFOg$+~(z&dbW<=pyN!G**iX} z@hF+X(i>hE$*`O-`sB?axJnYK%D~TpL0?^T zHZA0TIl9r@`%?a)%mFus?Z#>EN21B`Gf?|J zSoMRnH*OF*zF5#P+J{N`pZ7$wMT4fLJ!b6Jadc(*SYi?uNm^$I^Y+f;Ns*cy9l^X| zwf)kCtggMR7uG^}(J&9iWsDou!C2B`jlek94^%4|n|rVlKE2|~`kB~WdD zO2pDTdK|Zzc?oNqMck@hPWq&d<09q7+@rA~?!KNJS2v06wN%91_%Jp<)N-8HaZc)F zC+3bhi=>Nt*!;kB;IBJ4PM7H!U))68wu{^7SC*OeI5&ol*J6A9vPw9zD3Y8iFe0z# zN$3W#5iMGB1#XsW(iqFWw8$!tR$teqVRJP}=%Z=0ygzg3hc(K)vg##d(H_tV)1`+{taz`?Q)fD!3O2!B{WtI?LI6+Z0g9jU7;e2k4*O1dLr0%Xn6p3y zua@KpH*QwLt;2*nQ#OI!5q0YSVH}LQdl&BXcLMcCyQ%4<2UMb( zO9SkpAthxEpD>_|oa3yhiaE<0$-j}gjoQS&TQvu|#r^1ej1z3S%VmA%aa_h#j#C*c zlGZVusD)|o-)D%p#NSL8V%d(ibP;F5w4|R?MVyubo1xVD>d0d`1X_`fMw3<7=EgVEWKlF#Ph4mz6;ZokEOe#b#IR-szCy`mp%Sl&yDEwU% z$e5syp0qdi!V|o3P{JP4OWbln%h!|al-`%_6ruR@9*^mQXCJS+#=6Trpg6-yZ`rxc1 zN^m#zhV05PWB%#iW?@_shgW@!&}`FHh*xJCIqQ^vblQQ2_ z=h}t}PbCLP^Wf{`?Q1_cXyAta*1Kg6kJahWs;j*Eu>Pc<>mK-cL61cI$t4Ajs(9_u zC3qOAge_TfVB?I>kS3~zbKCUr&Vju+bjuuk);kOnXZ(fdyHCMBuh($h@(Zl<6XT?W z&+xc>KQ#X8$KHvo18HLkT>MT!Qac-UeX5~x?j{^%w-Ah$6!H7~_S2;o>ja~zhiT0b zjs}cB0mGJ^p_kUwVc+yD`ma?PMJ==WT?Le$cAEpY$VRHL_Lb0Kyaw_=?}oNbCx}s% zmayI601Ul-99);PUZ1z4aPWC!6zr~n;-OYh+;$oYMM`LPs~$G%o8qnh${1;VL)Osx zAKBMyCo`X11?%q>(VouRB(+*XukT}=jqZz7bjgj4Ob?izm{O-Hw%p%~$aFA_@EwI_RaXhR$be zJHcemTQY0mUeIV1V_g4eS~+tPu1yIh^SaWBuMuNKur9W|_2AiD*=ZxSHR>M*%y@SA)ZaFDmY zWX|gk&k|n6za#xx>dEYhrqtR+&iS+%q1?ri5?*+mPV{PQgyUm|L3URWUu=J#cW9DgRdTWr*=fPw>l;Oq{sds$ za|zy%Mq^Y=pUU5@%Wy>HN%Y-nMPDE2VErIrlzbgUEmzE?^G=u1jKyzY-NAv_wMq?# ze?3W$Cwq~BW=uytyH%&WfXb z-AZ|jJ3DD@(+F5IXe!>$H^rpw+cD^eGC#x>WUn9063UF#&}-In7&Eh8*xQf+KeYM^ zo~*b0;m}_Gq`3|zNC`IkOpxsyCgq!K9`hrY*T9JBzp-DJBj(*%Pd^2#qV6E2%FZcf zbo|G=#Ia*MUHeyyW~V98)juS3e$RaV++`EliM?qg=-+gD>mK7(8uf!G!KT<5*~1Sp zXa*ngSh)Q?i42@mDS4Be!Y_E!BXQYyfNq(a$oi6di2unI^ljFD`l<7oETn5a`8AU# z_qSG%(etkIy%k1q-L;hbDC&Zfts8i=J?{KacM-SgfQak*&T(kS&g2jgcgjk{O-T?* z{~IBahHn*1^Vs>}#m$W?qm(>R3V`= z3li|=(>*vhVQ8iI4I9j!k&m&vwZY!=HgW2%1BDMqiC<-`lh2AhWS#38LDl9Y6eyI) z#_cesPm*p^$IyS|m(CWtKk*E)bZMfQ(>8*7Q?JbOOc!~y|EO%#{Qt;fjos8CQ$+0) zXHccVy7W(a9>uNWFxay{8ox`#r80t9?iTowy(>%o4@q`7j|5_JSu#9S2^!AqBMUB^ zC64_xD%7?H!TGD_!By=ynI$B{pY8L7+GkJsrj`5n9lo#Ou!3r3gnhf<)@OjT>v%aT z{-BL*>QQ)X^c12t{|wz|S4}zw8e!?RMKG=33F54`n}7RxJvn&u3H)#MQd@c}n(!s@Via@Kq^eb7`% zBj>NCPq$tZUdYmkMZO7fHcKaO?cE?iB@vSH+KKl9P1@b2LiEytX3ns+V73y)+ zQtU-*_s^xWBk9yr-J52+HVG5%$)n99Ip}wkbr=Tx2elI#z}86_3xb0!@dH$=1B;C9joYY2&tN95zA=FQ`}uh~<_IloI*96~9*nU#QczA9P5XxIrNf>fI^RBsjql}Q`zqE2 zuxc+2ceqGzHvi_;yA^~dkE_Y63g*rEG>2>sErSnZUqkxPVED7hoQgK;QMsOdP8VOC zfN@$aux-i$I;o|dM81tAHupPa+IfMp)nx_zhy;6_VDk}jHaSAzLwTY2x&YSa0e!3PWjqx+&epvQPP*PIOk)S8{&HR(j*sck+9w4nK!qFBFEJt2lS} z8_~`G1vk5wQ^jlk^rC_}Ek!*PmM7uhVkv27zK+nZ5}9%La@_UwG^`Ulp_z3Ie>80; zw#lht;w=undbfiRy_@FfxA4{fIiSX#edw{_9DgNqCn-H^M~`<7!_qw=WR(3r$y2X; z!V=FJV3YKm_|0>1)cE_1-~d0uS9OzS!{2<&lx}qGmjN#=`Z#Y+OePNd6e^uwHIc_Z z|AEHq!FX_T21IFVll?o>c+uk`zHCw_9G_Aw*lu~nJA|Dk&@mDEuImevMIw+7kV2hJ zIH|RYBpVuf__mfLUax9Bzdv~Z)!C6svo4g-#a)p!SN#O_o0$NgL*mHSc}qxm;8ya< zc8rj9Y7HszbrZ-78Ts^SD&M^Ap(F{aps}h+I2$*b7Y-~S2Uom-Nya1La$_oNO>shT zQy_dBe-tGSIgs4qI9DZ38Xw2>-g{zc4x8iNk7k~=Mz*t06LCr>L|o7lrqN}Hq*g2! zHtV05(`Wkd`3LOtmqc8xOvG(vUz6k^mL5CE{Ax~O?l1d4N|?4B!p@{+xg4kV-$8Qx znF<}BZA@dAtHQ9+pWuK?4GcF|;crWH=-ck`u-ZtTbstW|srr_5-h$E2PqS*s6q!BF z@JPUr=yR99J8qzD4>Li_E)=8x{D4cl#>iGpu%NnrUxe+W62QSqm2_`?%4cl@G9$ZN z$UB-v=IK9#0ZW&XtK$^u(l(Alg9Ee7TLpGTmx!e82F*O?%a7af9ha&+k?k6j#yT9D$)t_Fzgy@@mI^BSB&79ywNv`O1L&VsU2#%4 z3L8gH#i!y-ysTVS|SFHBZE8_qZ9p3Zp=emh@Zy@#cE1|>FBgoM$ z)%bS{NB_poqL+rBCL?Fuui)iG}xaf>slD@i8- zNe;;MMq}H6B8*UJ)wF;PTyY8opOiY$#xtb`t_t(^pO8Nn)$%g7UBfE zL{#scjjNiEqq0BC0=mkRWAC4kSDkOjmrxH3II$DdxI1LTw>J>%GX$!KayVmb8_9br zk2elPVxp2I)+g1%6Hzb!aN-nlL$ZrD#Rie+l2-!$I_b1@^$3#s;RHo{Pny@`Pxk5c zk)&*^C8}CK=yLy%LG<#6Fn)cpCQKTMOkIAHd}4ugSM#qh(vMi+|)X z1iz*31%vw;q&DCtiFR7Z&l%qg@)fh_e#=a9ZB91XHI}23^lRv@q;=?7mo8KI@({wT zZwWyxTlOSexpL3HPB1>>jmf7=q3vZa-e}GyWgm{0?<$EB_Q&}NK4CGW&Gi`hcr%1V zIwZrlEC=%Yv>Nk}ji@Xfs!LB+B{>gZ`a!R^W#!alj+QNz5)g^SLa)8c&TLYw|N#ujs8~o+M<{HBvfrvdrPj z2{PTPo%9bgLbszYVQvbCJFb+%;!F?F`R9aN#djdL;DGS+ogYl~8G)DI-GR2nns6#r zmG^gOlsPW7!u#X@!LgU$V2^bIJZwJ?30LHC)qB?A@g@~AU%0`Z-^TD-NrELDp>q-* zkQ?_NkQ4Q%$iElE>BJG^iB*0l#BUR$#@~ftl^0F?eCilS-W-RXHDMm>2VnJCmwt|4 zOoqNa1S4w~!Swx|AYMC#%pR;p^9x3k;vc=_*yb{#;VYpFo(-qvp8MfPKHFnY&VoRB zH+n<;iJ+JGo{S57%YU0?PQO3eNW?X!m2HZ8Way(1`n@=oT#luc-L_xBw`?uFKKe2p z;JAfODL5khE0{z3EK&il?jz)~i5n>GxIw;8Wji<@PHM-zOz+K@507cxLzzz^`U@vL z=EJfxopRiig`Bi6^IqS-!)B?eBB@X%l0G^pme#YK+ezm8d+#OYMznI=>jts(#7Pln zH-nSbeC4?H%+I~Nij%g*nF@Y=Pmt1jLhRohCu%E_odQ?R0JHNkq--3INoihW_rNx2 zuh;}nwi)AGAL~l<^W$;Lnh;nKoWoBDRi@V?Pf+i_lx8$!lNGV`Na+0Gztn0`E zdltj$iKB7xJh}2Ijy#+`kUk9ROU7(G!Z&A$32(n0V(Rz9%*`lCWqo@lr|67x!b>$No>4TxX+I0d!6ReolJkA|%AyfCX5k2Y zJJ&{dowS++PK^}QC-+4yhgFzj7H*>$M)$Vad_#CW@WB>8YWbHg?@Z0Fk%;(m{ClJw*Di(2upR&>Ip2zsi&h>jehLMIjsqb;q!$(fIn=~JmX zZMd3AH>{h?e_El0b)PSRf{hO2m0l$;rO|Zu^97_PaI55pgDa_;TSfCrK08`G3jw2# z`yudHFc@si;@jM};wuFu^qO}MQf3vxt(U&|C8h|L9%%+!<$vgSe>FJ0%oB9*GEo?% z%E$eej@P)k(6?+bc_kYReU?ln4n;cD>TU|jDZT;AlC@#*R%Oz;>=E z`?9^)OtGTY51Y1~BJL%<5dUs8eq>(vW%{+Svg<1`el!sN3+#aOVX>^=dIe4z&X}C{ z9dQ21VmR<|BBTZ76OZ=q{Ex<=!n61B?ptF>$*L=msnsc*7hQ#C$>VUR?sWY0 zYbt(K3BnE2l30#^m;TdczA{awS#a{40@EH8_b`pZm}w76BJRZ{ zwsT^>A4fNlbhDV_NC>-AUEsJUznKqk9P>CI7fFvY-NUVxhewB-c&h=eU_Is&e_Y{( z@UUMb_W8A4*!61}IU$_z7OTWWC<22$GKZDiD&z_hfd*?w+=AzlPTI336NH*gKLda1+}*iNaE8jIA+)-JJPWY)@@-Nx9L5s zmtY`d+qctMDIR?3L|1rZ|B!bcD<-B1$~4(FMKVR-gTM6h06D${VA#dWg1KHcL`|z9 zdu-yNE$j^QJD4s1j-sT-<~4UpXGVyneBy7wm?ba2|?Xsl# z0sl$qv#jv-Ct^SKK1>xwOSU#oA^*9jLrJ|EF}CEfSjutUxniljj);?LinyW&a@=a> zE1QtS{KRY)%~Fw*jvg-Jk~Ns`@Q_%F%8XCq#d52$9M^Xl$NksANhjXpxP2|mi^gWo zVq1BxbP^|R%9i6EJ(QD*WSn%`fNb!6I}GnSK7vanxnQATh`z?>VA9!Z5}Q$v$*TdO z!f6iRW5HkfX$4_>2o!wEZmn6WAY);cbuQ>|HF zjlD@_ugDLF&Ra-h*<5z5BL{pH2a}!4YlYom!!hRW8)4>FNAhHlDPLh&PI~t0!X@V2 zY*iIW2Tc=6KQ7_8ie^rF=AXQDp@mpl{DhOrv3tfQHvhcrdI<-}r{j=NOE zJ!oRHJ^P(|%KV!h9Q!`M=D2xmr)zsvEbV6V(7$SqTg&b}wewmqMD_}8hFnI4wsg!E zOe>QZyElAtUoxdOMYvSi&UdR?(yrv|bl|k}l6qS~k~wT4Ss$NA=PUWqH?P8>dwwPK zaX&45+vh<`AD7da5h{F#`5|!EUrVkUC$QZA3BJ3;j%=SJB_+$sC9dg9gzut6^7P39 zIAvcBQy>J^#9k(bY4zmDJv9>Cb&w8B-$AC2Rfc6tJu!O>#W!|a@PQD3Bb*{}+t^ra z?J+{%3E^0!oPoD=Hli7u-wv^vt?MSsEVqfdMkBG*X`Z}v^iH+|XI@tweU3XH%Sjbn z#9ZIQob-4Z%VMzm-RWKtH`z!eeaD`sE@ZReY%y1|NW@*TVEXoOvGjQ&Clv(dS3bgV zRT{=<>+u;B3Zu}nHkvbbqQ;RS# zdLLF$KRVD@MzV%?(?j1G)2AYa)+WxSM(Wc@zVaO*S<1oeue+&s)JCc)?5DFL4V=03 zSu|kBc3K=!2X-GaAS3n(A78T$QZ4F*!oiQgN3Ryp3$eay8G5RB3cF@rfX5M=h?{y6 zIWaOA+!}IYh+n8C3{~ZVg6-zC6 zk$$a!Mv`?B_K1>rwh+E(L;4C?^N+bIP2()2Q$`#`SQc$wI4$Mr(nFj&=wo+ z6ZEZM-w*aIKd9r;AwHIv8?C~J&o816XyK_l&d8K4`o=E~TJu@=W!yAAzc7kEsrRS7 zPOfyc>KU?n)M48CBZj_=n@YcZvJzTT#^BA-r7%}-B0YR^7G7>&h6Sp@SRecoj5^lg zgI|Gg-^>zzh3til`w`G4B!RHb4a;PTxNF&A;S}BxB6ci>z~O6!pzkMOso#1~iChN- z$M&!ew_tkg(|P`EXaVn?HjSLW*-I2JCDWT0H)T6L?of;A<%~PuO+J~L;;GexsJ+TF zGG*#(;^zGgl*1Eo`-Rcex9K;T7h+6T$VcFrh92CzeKGCp@tXa-4$xs)T&3N`7Su`k zkJ#E+(nudUT6pgf=01=|uPc3M$j<|0jr%|1x6BgxkqzK2%HWmr70@~&ROW1u1K9(6 zg*OETWXD}A*mHfP@V#axj87}UTY=VS*Cz#HcgJA+g*!NS;1kdo{tZ5<+Tsk;4hVap zie>gy;PEmSyrvCByK{2T)ExtUMRUltqh=)8&sm~1|FO{T=?_VAo)y`2_l#5i>KVL> z-Fc$>W#i?CPqxw3h8L-0ZZ++_;7jiuJ3)g-&8L%lM07~0Ew!(bqsb3FXh5I>B)g|L zS(;|RjGO92Dk*NSdi8h4-yjWOsh3F3CGQ@q3~j$usd-L75S+< zH!ZzM=6eKJp3ENxp2zz-_gmvet9=b6?gkxrn(GoieM+X0)+b0v#V6jhEY0cBfFbZo zQG=d%v5BtNzX6=yeH`&=9B3}Hr`7UVB(Yh6?Aeft?H?nEeP%S7sR6VqDVGj3{Ed-~ zPSipFENy=-rG5Iu@@}og_&zEET-3+WFBgVVogvIiRGxNziqG~yUX){)dS25wu3$)n<7y6gv>^52b#;+G(Y z&tTxKe6Xwv=Y4!q;Fje*i9%u&1pSNv+nrMWXX%QHj8D$|-SP}D8d(6*t}n^PqPIk8 znJzv3jP<^j&Zt}+HkC$K?{Q9A8B2eynpSyloiE(oTuU_9Tp(vB7|`!QDNw`yD9atL z5J{bgx!X9tvTQl*9d1WG-d-l{y)5hW>KAc4ahJ5LGN&iX*pv?S~E{GxA#eoGjRJm`vNrvapZ- z6Xt!Bry3Sao0{s62X|IM>c$yVTDX!tcbEXvr?tsuM;ssvR~gY=_ZHEk*DmrKemiJt&`xK>rP==iw&N*e-{mQawsxQgYZ}7P-TSV)b2C9SG5m`W$YZy z&@pJ~uZ3Ej;qZFa56EA=6gQ^o((Zd+Xm|Y*o-&qV-iTa&N@)~HJ1eC_YYJ$+c|7$s z$s%gwuajkg*U07CQW%GoP7`__u^#bPRia2cC%8zPS6pm!I3SZBZOJ07}p}%+EB7auKfYxzG z*!)R{p48HD7Qc(43$`o<`H51=CYLO-{>y3_wDclLDpTMus;0xH-6ArzGKbHRD|UK% zd^q{H_$rYz+)Er)FAD9Yy}WUc6@S8C9eWBGllfl(_Ice0V^{u!x<(7U(mfqtj2MB9 zwb6K@d^SEX;pkJ{-PA6ooO1Ja(^m)P&@`Q&@Zah0vT0lTlD;20VEEbV@QmFb-B>PP zdXnvWUBuFR?0z+HoJeY;D&k(Zv)tw$j#J*iNguG?a{G9uld!$ecLvzgc4KUnn#s<waoM!_!;SF4nB}3N|U9hoZ zomQJ0c&%d-vHQSOVq{eWryj4t0veAq4(^J&nHc=W3!Vx)MX!&o-Jj)Qcw>nmiHR z-z->+ju(PnSnwrJz5qX^Po;)p66S767dBkpM~Vkaft<@k1(TofFJ&P;6m967^e~FH zv5uzPN5koWdMD@lfOf&(ze^IkW2+#EpXBsxT{MC99Q+<(L-!}0!3)i{wDj(K;q^{8 z+^p43A3-B354cBd4^F`-+FPLkqx37((PBKmwK zKlboFeE8WIq4NQVO8!FU)C4Td_QJw#9sIV-hshrwkzhY!9nQNy5ERt1plfFY<7f}& zCr7dSwaRHYZ+jeSEdAl@%0W;j_ZVEwEbyMgFodI%Aieghkm;I%N{`MlFPaBzUcHN0 zrxx>LbN-PjbLKLhnl_oicqM6D#Qf*QQhJ_sB5ZO?JF<$NRg}YLksr2ANi(?GgeD*wlExBa?Zh%Fd12vHid>B4COu5Z=9#Q%Dnw3MOFCsNdgL`qI`p;ltU}{G|L5_+!~w;Rn;edM{-Ny&H^B<4rd) z>p4r>!qQ25*%#7>_L9klxA^kg+SuOCSV$*Zpy9l4is*(BZuM)p=}<8*q@H1=J;W9@7QoS^vE4WTMeLf z{|SSQD10&_XSe7-V%o4U0Bzz8}=pbfraCJ;m5Mef_-By^!qJ= zw3>sw`Y>0b6mALsEo&5xz8pY?)O>==Bn=$yzZ8CzDx#t8AhZxYLgUvO(vtrEZWHRq zliFqPvCrdlcy_=XefTVVrZ5J#tu_GfOc~i*ww-L(sD|Bj!^y$9t;Fw=2{CJZE1n*> z2bwaMN=I*+Ku(hu_~!&tZPv@F?>GP)nBIu`Y8do80zXx`xV5W)faD#{xTwTP99?h% zmY)Lgx~di^Q3xZ-S3dC%+v)|y$2LfZl@m2(AL3ZTaaQ-`x%d-o_S(yFozq$V@U)22 zm?4soLOJd|+rJHFbMH7d_s%>glIY8G68{0rdmJn;>FwaSDkphvce6ZK$+V?SRZNo) zmgCMcFS3+9gExqoch-yJ^v^qz`)aYg#i0h?ZeKPuZkR^*T9(oWZmuYIb`N^l)#JCo zD|A(YC%vv2Mt`0Ef*Z9Wh>AibJP^Djn9IVwa6{R@&vRhT zLKQdfhPmY410~t&U+S>ngdMM5*`G!>#gIR*heEB;%&(2h5VrOz@}Z)W{M^u8WUF5v zvTyq*=)Wr-hpe~7H|Ll}Ho;%;+q*{yeQJVJnaB0&r?q^y`!JGx$B6t%%t7g}gTh{VrfMEUk^eyF!c9=>_{vL`?}qcY!}w3RPhrlf z8uXa33|DR$w&tuKt8q4x2S?+PGNManvNo-t1oMbP#UN?GP(pJ3;1~Ulr^t{Yg15B^S51@b8nPATfK%yojvxa8M^PHPj=cPg&r; z-CNN2xE7Ax+6Dard7QMh8K)k$0Ed(H#EMG*L8+en9K975%$y8=W>mtu?PjRA@3gRa zl?+z=reuAY6q5(LVuei&>kXYx-sHx@C}%@dsQU@tVXCMpFApO;7J`-MFQH_%BJMB^ zAQSI0ZpZC>acl z+ZTR^{uvp<=`$gC%T@3Xr(MWn9>>;C82EwNOZ^_%RX)smw0~xP0k7_Bu2|-b#`PeyUgqQ03q_4Lf5n9bh2w~EFRFv=%Zv5Gax_Mftb*z|< z7&6q&TIc}M9V6Sg>m5`EjdG(w4RrNH1N42AhQ=4XFlJy4RbRcH+%#ST#VUV=CH0JJ z$rwKYMNP!+#{^-}$8%(!@&exQT0QZ8QcgrEGssUHKh}3rLRum>lM6}9ShjUKZH)18 z8#msSe&4G_UtL~8uIro9@F}zC3i)4T1DqzW4u2sVXHOR*-^P;1Q-+bv2d&AKdsm^{ zzMA!QDZnwtgnhn68x6l7h2Zqb;8yfna36Ud44A*_!}esp=EfDWpqv0e~MK`uf_o%3Yv@5#-N5mSyQ{MUVQbQTgD$c#QRqm)UJ27T+F$-b)RfwJi%3 z45G2r_71G-3Pd}P3)t8F9Gp`k5R~!7Rq5^=uq~8B$!UTcG~NhtpBIo2gG-=Q<%2nv zO?b}1mxdb-cH58;Ml(|?us$vUCzzIl&*@6mucyZ}v@BGc-k+UyqxiqY6U2Sae;1!N zyf5&F2MLOI4IrcPHNSelnsBnf0ICeFspZCV@WgLCV;B8`sf*b@eC;T=DNm(vb#|`o z!g&@OsyB067eAeTKiCK>3_LNl^%cxsGMrvYGa|1Z<-@qF04QpX7uM@f7V_VX!1|60 zt_`^xX;{u+I>|j-{Nv|3*jsy~X{S{%Vc<{*9y1NEjVi@^OIpc)!$!Kr+iTGKHM?-< z5$mSi{XLqD?^_VVO?f1}+JWBQGZqi~Ha6mpd&Fv`Dy$p#&($;`OBkv9QOMMYgbRf` zNT!2?j61g!I@xagXr&lpDz}leX$$B#7Z0~n;oImcWi>1!j+hrPMW)NTTnlv$KuF;% zxO26UB#3liovkNdVbACZ{16yWF^jM{nD`7RBPr9~lb_8z!P`L;m&VssIFq7L7fIB>!({mTz5stj zq;^R%6%83k3k^@vs6-vQ#jJoz9S!KN?E-(r+nuEBH5ImeHlmCEEtGCn83N_%>!rP- z9sGh1jGq}khd2Lghkxgc$8{&2@Z0ueVdroYY&IH$(-w|GRh27n{m}rN_@)Sxtp?(( zAam3TT!vlN!}0RlIQga$|G@RDn zf9v|)@-Kg_D}wemjUk3ZftOO6aIlC#QIGTmy^1+^9bmnm+}T z8VQ51#O8?eiKB}TV7mUHD?J?uUY8>Rh377RhC~VqsTPS*d z00M?g$1gXk1&{MF#OIn4uW8*EipI`@sCRQA*ri1{dAAt6=hZ^A)hyE2KLfrMPI9&I zTQ6k$*9yC?ZUT#?EAaSyfF=J0VezTQkkCE_al&dm_fi*M-%P+ew`bz5Bu6~l6-pn; z|0P+igXzp?CrGEtar(*TGdMo<0QcT!^xJS0B*PUj?57#|#!q72$XIfpv&N=Ea}vF8 z3Ej=K=z!S+WW^^;G1I4rzG>(WE6)))=-ihQ_Wc%I&?uCR(tvv`A5h_Nh=zRs&fCAP zg{dc33$L0+@LjKcg%gr-O{Wh16qcQffn&)AP&lM7{WWwdH2mEM>Xz1Ssu?1<-7iHJ zzlLe8tK8iBs=sH9wjp4CVJSUtFjokC(+i(B6-m7{@A2k^6e^?Id9Tghz{=Sr`89G} z2-9m?@5pf**#7a4wVcFJiIZ$)yB{BR=5%5j&cJk$q&J4m9hDpx>mVn&rXi9jma=(- z`2^Qyi@3xF1+Kn~@9Hw-}xh4IJfu#Z@Ot1?xTr;?^8Rnct2jc;(|>_-{=Hq!i`yBMfqyikRQ< zzy%{>Wi1gxTHE*t&3005*(-e5uTC1jeIi$mG?H&gqRV5?4=(A4&TxIW)a zzXTx(?W!Q2$HnNUFcA+1XTm9yiD<*v;w96EV7O@-@f*K}MzZgc-s$@xC^7<;?b3sh zah>Gd?^GblJ@6pXkp$J5i*q$S`R!hv+bv_bv2u5?yNyH zXTf1K&))^_%$L&E9Z236d6;|#v4jeyHpj9+2pNL3~*BJW>}q-lejc+tRJ z(4(nB{ytKpzb?tq<`j7)~++r&>u%6Y^Et=H7>$}i!t{iAiE)MHu`{wC#5?z*W z`N(`(ZJit!zk=y|T^y(Nlaq9nGk@0)PI5F$LE^2;NiyGwxJBN~zih~H!`LjEN<`f5 zPPSi7<+z&)a-1=nVNY~%lJibX=xbq2&UO)Ze&9>;Fw&YB$XzGW^;e+j%r-t)ej&@7 zqA+@NC3!xPbsoG}4kppcZYvg9DtK*1ux}XD5aUum2q9V{d!X z&$>yZ_KhB|ul^jKm#KgYyJmy$CX$+2r{TXxx5>=&2VkhL9{rl5OPf-^H8&cqNKgDV1g*xS z&?qeu)W*E$V-v;^Ih}NvaeOkmYz;@XyaZHpjKqf<*P-6zVRZG6d$e{x>n%7Wqr*Fr zsnM@&viyoMRgK+)_m%!5zYlhj>htZwjMKWr*eDMyvJ6PX;~pU^{jlKv?mf@jZvr#S zAibuhaG~!K@K2sdLO*$s#N+BD|4s=Ew64JH(pVf4-HtEzY{n^*4xsY>HT3ezSXz7d z4cV$Cp;b{Ess8Q@q|kg8>~ANGjq+3o3>!#&PHq$5z41{9Uh|1;oZ88^#+3>C)wE$z z|1RN+oQhz-<%O^zZ$9K|=1`}+YjoSkb~1BJCbc|yk8V;KDad#I7VbL-)*OL?eIUK@*Eggd5jspI;<`u!4I$_`;Kdde95ti5(lfo^5 zka@cgJ3rn8$Ed61XyQ_;xb6tiojwL`XUF22B4uoRbVVpDdI~-J((&hwZ5Y4Fhj?u_ zqtT1L@s6_@vs;hd1B}lTttXOLv3>V54UWrT*Z)O-InH!}0_Sv!c^c2jaYic@B;s#O zpXsN_bu+e?=m>i@N);p;-gB!X{q8ce+_<5ljB<~Jln;C<>( zmI()880#pmihBnO$EIW5`pY1!osE9753t*nK*LE1)|pHJ)+8;&G0%!}f)^^i+l^-m z?m(P^9{%)4p_`v7K4b2Snmwz?{e8{gk`Ru*PPOp-Q$ISdSO=!gmq*&!3@c|>z`r9L zQ7?>zI7cPE+0+>wUC#1{+zP4Pygm>%#T@@zZjGOu%LLEZv*Zx>2FB!!!Za;{!z(#l zkvWuPER(})Aye^J!44egl7gLatMMhvzh>9@q#XmJ0wT>(`@(m+8r(^G{MDTccp9d?J!_j&7hyoM9u`Ji zg1|GjXqFs@AFB!O`{{|AW~>83UJv~@?n28LXPmA-0JnCj<7G`Z>}%#GxWCt;h1+@B zoi>PiO_joU41qzNndDb)H@S824|#Rl4W@nAC2VRmqUxIih`ZWLV(GUF-pANrf#i>H zX7x+h;~6H6U0DLDgOYJmW))a`ppZGljBFaqm|c0hX(r<<=y&T9{jFU>$$Ax<+kTCF zzWrV>^GJbHH%#F3d2?E^rj!gkp-vw?{YU%#FreClY^dGUba*|)lKg399ImzYuAVpA zAwj3FY*px1bWYrcE7$D6p2i`v{GeNKRlfsHZtu@{u$s7K{vH??Js6iA&jj0IWmJ_! zL9pF}o$O2{oaqv;LCw$%RBI=>utEbJ@Rv)9HN!#>0ARK~ujFdYyHc&Uik84y~&u z!xyEKlJmJ_w2zFO7|FQLY*v`uJpq^1DWO_xCU)rTLifON7=0@M&o9))4UQp<*W!%g zwcFrg%YLwHb%LMOCBjkHFJLsKFE-3k#d0|dd@yDTB&L(gH+}pUaa8!s7*+4M#n`ZD5-z&-8#*?I;i+Bs@a~oYRN1VDNDd7p zBUgSWYM0#TzVGvz8Ya5Hw%mcBabqmmCY?Yos(bU7j(>yDzPs>;QyKhT+J|nPvz|1i z_ruZ3$8qd2ZOn0cEK4<6UL@&L zBPS7?|1X0@&GIm+c0sW0}`2RE`U}!u)v!AtU3(G(UxzS_NbaMQnGEHpXO4dXv(xn5#=$nJ)^sPfGT{-9}S@$37UoT4$ zo0X=(?a=}J)uVOr&$3!-y(=Sz>aUy$I8Xvl@|dY_Z+5&CXT1E+INU{@I0z;po8vKR-`>s zFYp)B)$x`}gw(0S0-o8kgI#wMiApUN*ERh|#xD3SLkzl|h0O8{yvl zX^gqY6BCz5Aiva&4jBpbd|N2>oqHOr)iq_#kL^&nw3nP&w;E<@4v?+MT8YcoEW|yG zuW@BW3;eez5zW}yrHW_s-V8ZTJelnyJme&k{&zjD7fD*RM3N@LaSPcDXk^6hTbUep zm+d!HE7>@~&i;#&IPP)1oa6!9Poz|^ZoWPu?$`+h?&}y%63%|kV74Q9mn_HiH)@6E z-`50-0lDyx=)+{$YZzxR7pDtffps8Y#pPZopSB&R>(%4&Un10uT!;4pe?#DvW;A-6 zh#tircy97uY};dmJABg6F;gT}H9jOQ{j>=(tNcjHh)aB*mKm5=5d)5vdEniz78EUb zTzkqCOM+Q`Z1Z)Yw5So3bNUNknAiLEuQ+;gOg62pTu#py_L5?EHIi5?A?xB>`3L9n ziRNV^>UKItSZqSb{1bw>`jHx$J*HONW0S;tbx(jBoBP0e)S%a^nfBLviQG+DPLzKY zLuW$|IoD~%baloC4YYF$NFD?K4&OwTQR8JNz0@HiAR3BR%@gmLBO^YB_N2wB272=A z1lM~FLWx2Q`Q*5RG~Yc1!TCD0{qPy#&hlzd+Bt^=Xr`g4d?VJ})fCd)Z;@1ccRDRI z7E_GvWOJjGX!XeZw9m*idQYhh3s?_!^v|o1ws{PlQRYr(55EU{i({xf+c}%Bb%Il~ zSgw1AA58tnauV^wp>0bvd1V~Ro8+aVTINpjeAsJpMBL2yQyJL3r=KjTekHwIF~Dt@ zkvT1Qm6vs&U4SZ2f{19AH_1Nwm^TdyM6(c0nEq0M_6gO(Jl8TZ*JUa!yVwb{)Kehq zb2her4#E3_8)3wV7UVs~kv-4TXkX3Qq}mD4zN{3_*>;e@TFP#_8cIl?^*Ok@?Ivbi z`p(a`e@1P`Ytz65*KqB?QfP_tM)9i!;JtegPEyIoaG!zvUN0L`QRu^S{rb~tzmO_rIG)Dg;eBb*hx#?&c%3mbVM%^7p}F z2TD%s_kn|dP6`*ZzBFCAaezz-S}U!TDU(xXmjoS^g+lk*;k?e8iNr*zNpf7OfV?^j z@$Hp(eFI=5w*@!+9*E}#58zb{_kd;o9I%_w46k4A6G|^!pmUtZ(M2lzXtb6mJ)WIO zPdC|019W|1c;P_Un0#MMLOzie#`hi0de%+NQRuUTacCBMVP{Yw)vd0eU5f40Fji5P z_(5BydlgCM%_4GRcsxARkT5NIJb&l#4_^C96@TmSc0uRW2I15{H|Zy_0?8Zoicj%5 zLgY0DOZy19q<*Qkpv|-ybLXw}S5+4|KF1I&2Jgm?pCieb*y~jBts{xu8il@*|G}-m zFru^8mY)4KiL|UMk#-2zg(*8dgzEQ>r1Pg0sk^*Y_^NP0T=8={m_8VdTXyp3+8juW zqox!6pN|F2>1%lDunH&()SeJ|=X-4vcJn@JzAJZEe6Y}e&`;)Lknbb7~lE)_)< zllsz5GSU>t&_s6TIC+9bM0SxIj~CJRcji)Czfx*(<`Oxm?k&`ZUWMrOyZHWjhxlu= z-Vx8cqw&7YP}!Dz7hD#4jj#GTjGx_)bMg9i5k_3S#V`6+(>N~u5=qu6Wc3iKq$e8KeMTX1#>KjkdULjWG_1uXU9OPV`_!04(a`kCOhz7*nW6eX@Sh z8*{#q5IhT#&X2;9c56P(dLz%qd#FVv%y5}VW(*k&5uT00$k!)@=CJ#}p- z24(?hc&QT=wAW!@)%WB^NGP_Nr(y1^^C%luhYo8OpsoHf(wdSF1((~vy|R+8wYMhw zDnoI|(GoJNHjDr5Qb_dATcQ6MZQ0E(<{Mq#!ZNc8guA37yT8vGGTarZMN1M1uwpD9 zvjF&aYfbNhwkwC<7UbXmL*`G@mC?e`ZpPCn&{RYwu3j`HkrTQ!g8DC6)=Cm0dZ@7Dj8f| zEks=(E#Sv_WV3QI6eU!k-4k2v*Q$jsaTEs~TZ&(`ze)%F*+>QlOUaFL6R~HP1+m!a zMWb$vZokt2krl<(U9e@iJx1o;Kcl%Z>S!OZ|*~H+zXY})D%%`QH)!T z@_w3nAV_v2WiU8g*~pu-oZz_+ZTwsHMB&Q`bNWtR5J1fnOZn*#G~9*GAH;a}_vL9! z>~^>zist1G?;un5$ee9IbPF%e55ccSH^8~!sc__N9+n+Q##`o#ajeA#^qkAQC5|ER z^oI&+#jV8b-TOckph;H zKbV#Yx{a=MZNyGGX6-zxvG6cl+bS=+TsItbCpSo~7usO9yMfF?b37^&PrAIIncmKr zMpcLJ2Yufe#NneFkq?mZGZhxX>yY;_hdPsbe{+Sp-vyxKzPXB>2??>NY{Xl2B{9!Mt z@BnBq7>QvA&kA2!3dncwD!OiS477|l!klH{(71jH)r4ZIed;Y_CB$N&XaF=$ucW&$ zjXbs;jZY>rcJ8o2*f&^3*lC$Sw|HLW-`OyB$X_-1qMk!<_Kl=pM~Iqit<^~Gwm{+5 z^E)bMJRSWE-eixWsrHAd zrKW@$?wf^We>P*Y7Q09N{i)c+jg*PkVawdH*fB;G(^}>YzIRia^<5=J1E>d}|gpR&^ngk8^!NNfYa4y?PypU?sBBgDxF0%nI@ln{^ zx{v(YS4)}kib(DoBY$Z*9-Fv>%-(pAT#X@O*Drl>z7Qw8-m-w;(qf z9GhhDzG)}>4BX-9plk5L>pMIO-p+i9q4u_3w!|3bo4C=f07x9~W5T^UJ3s<;$anRo%@_IRmi>N2c z31yI+YYS$Le}sJzK6L1pG`?Qr7_RWW41*jhVS8dW-BV;sx3u2DPQ$_IargrK$%&%x z1CP*My^MEuubZ)5H-l5(BKmC9I+|V+j|m-vabxWzm{c*3uAlOPRDWkaxQ?+zG~F0h z_>6%OPJKx00%iJee=&^n*T6^2H-2#J19C}SBpqa?O3P)R>5n=I8TxaPpfKnb1}{m6 z_>R7yvn7DVg$l6dfe2Sem=XWVNY-zuCfvVgOpf%Cle}Sk$5i0B(p_?#%~(!SwwCDy zhH~5|#_Sq?jrD#l5pgMLjHz;w&9B*v)x!M8IUe#7uQn0a!{*&`Ha92DXPL0s3S7Z7 zIj)5HqE7m8oT9Njx70>Kk{wUzKD~7?3jwn8l`(jqFP%BPh1N$0&&Girq{?6FiPytw%yP7W(RPE$vv3PYERUC- z*ccB9=M>z!)C|~j`!rR);7JF%>bb3)s!3x;jzc{VqYKOIjCZ|8R_=T#{dY$T?UR!+ zLN6Rs<5`}=Y7>bmo<|Ihos;HzaeTb6os8W%n4H~kntwul$@GE$$g-xdq-Yf@eIcNEZG8`lJhsTgRfq;2L7;{R||Rzo7BaS5Vi;a*6V%u;kM~lpOcKc(+kB@YNYo z@pCfuZ0JvQF8I=HMH!s!P5|FcqtW+aGW@)7VW<*mkckB=^e$*owwlg#U1L9JcypY5TGc>*)aH{3pXKP24JTn^jV3=~*fjd(d0&1`R=#+lZ8aRu z6T_G8<)p0RCmG^UE_Qh1gtH!cLj2n<5*DXPr$2V0 z-v}*Y{8LRZ@wpCY6^6bA8Cs!D|&f@zhbJWv`;vBa0({r4Uk1DT_vh>S;TqZ z2I6aMA#-^AnY7=#2j!nrU{v4xLh<=7GRYuAoYnQf_0ODl!Z7iEe#wiC{Kc6g$kE&+ zi0ZF|2121=ln~2*6~BPo{pk?M7(l+06zZ>kOoGF%VW_ufCGMG=hokMrqj9|iLzc*) zPxwjKA-}Hy(qcaSloif-ql1r^=nLaF@Xh=LQw%PeeSpQB!9%+`j zUJ9!uchCC32s-?#&6SOiJud~E)f>UK#f*8@H1N*KB9~Jygq60!x zsUQ7vFM?)1b9bY+B~(~3lHMDnj{i0d#*gVwU``YJyCocMWtw5vkplc$>mpmba2Z~? zIU2+5rwU!xh2ZsIB?d^(^GXv&3gaKoA+8Nei9^9S$U1Ucn7a0?Fe#*yU$$fw@8Y@# zI&@@0Q)dR*^R${+$1@h}y(GNY(+LkZ4Ws)-%Ct+%nLhXxO6{*^()+_~Y4iwpK5A|i z8UC!5tSXvGV<(i58RJfo;Pq+zmQjjO@})_-+3^FhjwmOIPz&1bx@-`!phQz#~>Zc^E<1~=!cV6UIM9Z-bjH~3AdljT) zl#-*bSnlse6@)$rhQ`53!m_9W@^pp@4)tFE3k!8%{%H@`J--dsE6U;aiGA?#<*9f= zy94yMt%cUkv#hI54Sm&fFlAs7{M|DTHeOE>^p0AgK{WH^l*-{B$3!w({Re~#cCdHT zC=@^$4;&KXl8<&&O_iPN!kftFuYG9O{q8C~XPfK;37}Ai~E` zzHD$Ud2p#!@IR(S-bB17&MWrPSJRN*+%=1;wseqXmf1A6_Z`zz7m;Z-4dh_&Jev8( z81CF153<=jjvqV|VlFejSeyY4GI$S{#r?5!sVQjqJcBb9w}lg$=D2Lp0_6JIvpth0 zylp!~94uPF)hC-Q+SVVY{~JrxlbQaozn6IQEv2)4!>Cov6)FinA$--0M3HY8tTrsf z&Sg8u;`0}U!}AhRTRV&%4cUw}(;9?%Jv&L!&@i;Ntf1){o;YXWG*B6L}-W7Tm~ggw%bm#mM4+NyXVtn zPi^d&Wrz*$6zGc~@wlOT9KP!Dg#P!w3wc_GFtzP!c%`@MmmRrQ0!-0mPAx2CytecVYVMn2-hQI{@1j^uis?4A<>32gws0;}5BD$2!1-%A9QC0;c_uf2Xl`K+O7Tz>8X9n1Ntd*? zcRNjeXHV9?c#TB{br|>U1#vjg-z{dYD;<#f6i=Poi^8kfLK^KMNi0i$!mG#-+M*{5wG^|lyI)_CJ`r4ZiJ)Z_|AM{E}kOHgPr1koSYUN)xJa zY8Bky5`w%|5-^+e2?@ssqb6p?4X zJk32mmE=8qO?+0%Q-#md=<)P)D!vv^-$ibwIgmoss~17S3?s&PX52AE%D)sq|yR7`om)f%XZBr8OUrq;81|L0}h6YQm_>#@H`OuW({IU`AaGakJ zw2eC>?R4ga?MDufh&lS?%(Ogs+@V9LG*=i>c#K$Ven!?9oW;q-?dUOe7#7S^cgr2x zNT&tWLSB-|P`${V;Ii|~xo}bI>UeF<7jB~cJFU$Ef%1QKD zUtbaHi&K2Uar*=0B%=QEoG!~m4xKFyQ|L?R&V#@aAn?)=~G@r48wla_ARgP;< z5plCQIZ2GWoFtubNH;A1AgnLb#QKH$sCe)#h-MCyJePBM*GWK3QXYZS(Mm{Miu_oWm zf?(3Vc>V^zLYi~!8?S1Z!tc)x1goAWf_-)znfyH&|Ghkf)q5_1Tmhna&Jr|#oh7(? zFA(y-Jb@9-rcieA95^=@@u90ngW1E)VAyL8nvJ7KxMsliz*$1ex4b#(UY_Ct5zOq%}MnOiUP8IU5KF&S1rR64II`Yshxp|$Q~tl{&d{=K3*0`Y2@@Y3r#mk?Q{{iYXjKiQdD3NC zDJ>-nW<=nMxy=|EF#==U_30rW9n8-+#PQ?i!TJZxdlP6EDUz0xHRZo8;?1y_sWq8(SGlpM3M9)o%rFviA099vt zsm@URVK|Pe*Vd5#5@w^=?*TISFP?0)q2osv(C&iY!p!ZD4hBlv6LBJw63LeQ1M!UX4_-uM_nS z9E#_R;XQ)3 zM+x+~zX&aynE!AeqSV0{=Jd1(mZ|cXEq9Lfi6gAt&xu_sU7=}hKm2|!iPkkWk{e!Q z(JnPhxIMjF!0U%eM^vC-^euod(+1+VW-+7;TLq6_sz6P$BfaByiWVxwQL$tXjZ#aY z^@^A2j!9`GbN_BI`>TdpnVUdAKt#`G--3(3r@@-z<3K$v0X}a#1{)h+!}+}r`3|uU zoY77s!!ot-t-d-@|J;}OE@6F)I!-Qsl?RiiIt90JpPEU6DwRzP)umhS_E1T~V?KZE zE>g8K4G$(BA-5Zck^OHFRL1NeanhYYM~)@A>OB6nbQPwo*vPxD-vozxl41LXXJk&I zG4}5>3gu)*nB_5tyng?Ts;~Kik>;0B(}-~^UTvde%Us>|iLJ5pTNqj$Rl@IyCUAP@ zIl4Vp#;fNigN#H$bi4w%L>V#O$`Nv3y&ledb%vSg8(?NuGdXH8hQ1w}4rxPV4JV*hd~YBr@G{gO!k+uu<1QW48P?ZC=>NAyFH?2LCRT3v`C9Ubym zqV59&PDP?w7AHBx?we`sS-pn&s$XAa>XYJd{fE~8qGv)U>rIbk9vImdp+ZN`f0)n?E1eD;5%SP$`J)=jD?;v$k2S?At; z!J;RLR<7H}?+-R0-#@dC+>#+My7fL?U-d=kw>Sd(>nwpP=X5HI`Gm63RrH-P$Bq{UBEqPhkv@FFx@0@V~}wN)6DwqX5SAZo{!Rym7_s zC>VAt4vzlq5_E&t<9u|%EeDQ3;(|R;^{GkFqH4IQ?gb3~=^&(yzuu&!I)+}&I!Nh5 zwxi4lg{+2?#J@Ix+VaP!%8YOtzJc-dcFKcfmHFoJ&G^^yc#en46V z#PL2o)*!7i79t(1`Eo5K2sE+f?Yo}|mJtp@V>t>#P?*qf*~2cvxc#T z1JKxSGz~u);C9(Gh`y<3{gI)RIBZ}nju$J$X15c_KiNj|aqL5J*;ai%W^f9m4YLu< zm8J>Wvpg`CHJr-W^g8og}(f^@xJ^MdG)%FTL)=(X$iI3m$5ZF?(=23<`=O zs{alO2EBT+BNoHpeOeRAZEq9T)hg4bvS^YvxeQ=*oNy-UE#L4*kℌBqHi>O5&G z`QG=3a4c^U!Nd0C*^Fy&`f3UN{p2ydBi%t~xowC4La*bQDGTX?e%I*6n_+b4ixC+7 zT#udQE6JP5a|P$PvqDn4KduT?#R<6_E^^wB0rCb|V7UisP9+E<*cqU$rVm{dSSO4# zHxSER(Z;t{rZ{wW4&8QSi=dpf1HNACBW;|Qh1)7q>10a@%1nD%FSj2k`_9Ir7TYMF zX^rbfTR_98?{LUuKUnx|q4unUSdh=PjuV%o1ZHkM&qXWQv1-w!kk~X;M%U| ze0;`rQsi2Uul`=+TTM=bjsuZyIGuufBJQKdIZxdGF9t_TGLS3%j9P!UlYZ;w)81Qa zs9M8n8mxSita7%a5$Ruu=j%~?+wxarP|s)@W#uKE8#RU`?(7uePc#dvm;7L-O&7`i zG>NX<_=~DY45+7_CaD};C;ThwN0JU@(^I#P5tlX9M0Qpp9JOSAW`_h!)U(I7@%bdf zcrh#v{vv$KuZ6T7H(`W#IDFU}jbk1MNEP%yla0d^_^rbn@Gy>n>sup;MrA+vx$h5| zS3vmDdjr{lOZ`~b&M~+bC=Y0*^?h1O2o5DmymuDV{zD` zEO?`@fju_YrRU4mez>~X#r$HsshjO3z!7EI}m)@7*kynY( zKjefo*pj2u6IhRJNHq=}B8MAtjDoMLTAoRrn*~JQkk1SZL)WyJ5ti=;dGw9 za7d&nvLu@Ja~S&O4MjS20tWx+gb^RSarn&+!RX!r;YW}o?vB+2wG$@r)#3)ra42HI zyAu#=WP%UV|G;nZgxC}*&^1Nz)aCz3I`eR5}{-$4TPM%mIg(XMp05}k|re@(7WIIe1HB?U0qz~T>I>`pXa{s z*B$f&L@!c=;w|Mc#dj~P`OgPl?x%b)t4jC@(F{(L2s5#TdS{7gX^idv<5oNY3 z(`T?7%c)L7eREGKW7L424VE@M=C6*pp#K)bBtE0;fc=#5H$=PZj*Boc-x+2?F(a5K zNzV}4Tj0M!gjqnrqwNP{V3Gxb+GqJNvB(rY0M*deSC2jWe1D^Aw=W1v*$8wGh{77h zoA}HbWzsNjGMTyMKU}wT8>Th#c$S_lF3Vw-&VGCutIzreWi7+fMB2WR|R36iE^V1D^H(B3o=&QNq^r_PE7fB(w` zqEquhFy%Hpi_PVyMn&Pkw;IGuT9pk|a{yj>j6Jcr;Dd%w=!-VToFYGyzss5W_n0nIlmNv~i#cA8*?^Yxe0FF?xC!9*(0uDlgl}tQD8oTicpo z=&}~@;fox>M!7`ZNt&E_vXOSK*8y37>ZcE=CnvP_lb~W>_#^!e7-9lQRP8CURcRi1 z-;@D2$2GulV|}tb^*T8;cmZ27f1pBw4P<{m0fw>`a7tAqn7``|xM{HlxW`E2FOfU2 z`ila#b8Z?qlXML8N9U2F8>f*uYwzO7*@@V7qB6TY^B{k7Su6Mb8IKkITYTZvK4%UR1jpTrg8wMXO%t7g19*pqO1zQ)5aj|bjU`b&WY*X=5!rK|aJ-R>mMRx^0qyJRVc$L-!T8cmh{Lf>+qK;kDHPWO=_Dxsd!FCwna8 zLg%Tlttgg!ys9s(I!T#Hp&CT($w%z-vxX$pOOT@Z5#;`#NqBE753Xu&f&C7CG|Ll% zjkD6=x41J9*xiAnhiYL(_9j?(K1=9$(-Wtka>S35(y`(EOwKJ~HZ=Iu1{PLYfX&eh zIE6?NC>LBJtWG`&*O-2SwV8+DR9Oj_MwxpLTods*vx_(+e-Tzzt^*%$27*+_Yut4~ zKemYZ%T<2-kDq<6i9`(RkONlJaJm`uiUF<|Ie@cDUj#+)G^K|BIZK*Ml;9 zV!)!m1yFrf5$M;VEW9vFSR>sFLU$%ZyR%C0w39OY_236^esz!XX~W=uUwv$@pAQ4S zC$WIh)P{9pLUzTrm$)He7WuW;7rKwXR1&;UwFR0i z`OJFsmXfP_X>6Q+6Zm;l)8@^e72u4N1eoqD3!=gwa2J1WhaEpV*`0-H+*0+AU_53% z)N6VMN3)c$N$&*mq-rv|c2y<(ZAT!#C6_&H_ls3j&gQ>wdJ2!wZj04rq9j@WEfzmc z>#gQcE_&r1B85*7zS2?hF~EkXeR@aM7VTyahks#f58A-ohP`as;e(vor&(+khq3+# zNqjn64}(ly+CwUUe>~&K#7kPlE;NaJTW zh8v57uv*^~8)hoOR%Zd;(`pU+eqG?6durpsvy<2>sfhSdenvX=;e?=h#PGf@`L86O zTsMD)pV$PGm@XNjoqv>E^S_7hZ7_pr8*4%MLtW_h>?;1f)fQOYZ~^T@Be+*-Jzhqc z>hE8l<=!N@fT-vVT#IxTJF%Gh1Gcf`?g=Fz`!Eh>Z<_266p_-)YoFlOv5oGL4B6)h4c4i&v!G=9+@nEMau?QD{KEYggd2KOV)*b-sm=BMbI8ahZI&nSm3} zrNLDee0V8JNak)DCq~VRT&vF?7+piNuMgWuK-qhuG+=|JHTJ{Su38wes{(U-egF;H zSu_889;fVjQV6SJiIcZ9{4RP0K3}E*^Tue#(B~T$PyLPH&;+QQMYC;-8o7SIK^QG1 zPa1?1NM%nZk&=B2_Z~gMAATA@w3o}lR^ORK|ECF?{LPb8XfFk;e|JMB$Qn-W$tCVK zhWPRE(#CxwkA;gP)`RpN5_ta&I`i+$0fJCn==X>PEyITRN@f9>oplzRJKYN7_6@Kz zY6V0>YaTi1cN!;+jAPGcIZ~6nkt_;0h4ph@V2{W{IP~LGR^!WKtiH33pE^kmWLuwP zcU0Q2Dzx)S%J42`)~mxQ7c$sNOLus@`vMMDn9%e{{xYd4q_1;I1CjY9*<|N?p8v$~ zE&tlmdi)121&6hy@w1@-uxd8un~WUB5#opN6}c5as?Zhl^e$nK9kux7%na=Eybn8% z(79scFc&{Q3;T5h;MaKq+;sN}d7%VI*?ro6@Ma~M`naBK4zI-C9}V#8j)TA;yGGdL zG{kjQSaE-~w&RS0aiDkSJz+y*KKJ{aA<*(zgU9Srae=^$Q)qq9kEs(OQ@b?TajQ%= zTp&vd42E&!L>}3%sZ2x*R}u?He=?VLNSve{mkp8k@gJED9&Bkmhieq3^S#a} z0O_oH;dWb5Hs-33otqaV?2Wp^*;T4Qv74@xlf3{wly`=A9!kSqM;F7Fx?aGI~_O_=MOw- zhCF_G64&Kj57tO*gYGizpw_7dTvQ1G{hc8YI`ZL2nF)Bd;0Q21w6$?giYlCAE77#^ zULM(@IG+?#4_=&g1KG9V6mkF8f_Egm!@rc45}nB$c{+=BLjR__HrfRZ{AiDK0#C5y zIOW4PQzrftp5Wqoo`5|W5lSLC5LB|g2vCCux2k6XZWXogZ&1nqWgeUOu6%{m1rN56d>LX8! z3iPFT0wc*TT;duE@(e$4MUHE*#e|1IbJjlGbtMH%+b@L+ejK3Qzd`=-g?cC=$Axx| z9)e+V6PsMUW;DIhQzWNO*n;iKC-CxHCctCd4jw%&OOj4W!+Oy~{DCs2cm4VUhN%Z2 zvZOcg-(>Pq{Q%K7T}fI4 zGf9bG1aaOhMRdYn^A{|WWVM2MoU)H6coRAYe&L63KaYH5XGit$t!Ar$USVvSr=IyzkRH; z%}X$5Kgg0<_&WfD_}(VaO{782u|4mMk=bw}=~zxxp~S zmub$H#z%W*h%go|BFv0)3?uuB5nOyKDmX^9B&{u?Ovqn47mv_9FV!d8D6?oT)1)gnm510k6Wkj=_nJ6q! z)57l88bOfO4>26<1oXR=TSb28f8Mp zQ}&IvIJ0Ld?UkUP->I^q%;_MyPoN!p)2E6EM(Mp|)@D%(NZ~QVw4a`8r2?6wqD+4S z!(>iB08Km*$-1iB^oI5v9{mvv^g`31@s=G#W$v^ll_E#7wLJ(9D|?YyRp;3o_vXQO zMb&s5PbNwAnZ&Gu52ve~fmyE7@OI030$xlbr@n^3j_5e}^4c-@ET<2*Pnr*ncFMxF zdK*DikUAtWFTtL}fne;(SFovc1$;SX3@5xi4IT|0Cyy-Sh~|ZK()N{l@ome9riFl< zR4d1aex>8CR7t{bb0VtI_GD+E5gF^YB$YQP^K7ymv3EW|uHJNDvwLgVMQUZZVU|0I zh%q3Qs_Vgv^5;PJ^#U+vWk{|qQ6`(;erBsrXM&w^3vqRJ6v_QDAJ+`5#P&7y_|w+~ zcw#{)b~|kYBd(YL{9g*&`sNF0HBE)z#>YUt`eAne2U$?x^^tF5>;bDDtcA+T4lpue z4TSAV@Fs5y^!$lHFJFt@Y1hdY*YpGFA78Ta-AQE5ZX?oD7)~zA>>}!h7w}|bBjTf= zh_@!Z#ui^=aNoRcyrwi1d_4AA2v8u%dT|bfC$@vI6+>W!`#mu8z7M>+RvW(ad;o4E z71&ij4m>aRfW4QNz^HmFI8!c{{dIgjj(=)M&Ok9v@2xCQaxW!U<{u=No*tup4+n{d z&1E9Bp&U53E92*TFAy&cYrMMr1X#L?fldy_BwPhHg|MHn*8TzZe;%I4Y3|@yS}lLh z!X+U3V0REXt5bL03?oCBE7~fwBAC7JVQ91sNAMV(ZgcJq# zl(RG3`DFt;!OWiA)p?F>zOKf-f^HzZJ;b$!(~kQ;d@{rSByha*7~&ESSTlv-isAEI zN|_1g_(uYG&r$*2-zclb(S`)qdUHmDdP4Wl)7g^WIz<1z8ctcLf+KB2@bfnwTyL%c z+jr=JaDUZ*>=BU*;Ec})Hr*o_*uPU~njyEG#4n+<@^=HWLR+nAIqk9cl91wbt3{!u zixU_b@xcLW8d=lEP$0!@01+~2VAVh&9*&?Kac@zJ#|TtpcuYl89NY2vt1x%Wgebnjtn8apvXN-}n++-0zET_@%wu@cO$YoO}WGLPS-uWmP;To$e+T>GKX# z%o9kpi!hFq<#juN$2ijWZVufoyWbQM^t=#ZDkxLn?^}BBZWm)J^27w0)bpZ<>36yl zokw@f;c6m+1N6^V9#W6WW>LYH=Zqkvl`?P2e(|?_b%n1_(L6&;7*xjj;FVf3HgA{$ zCAWQqF4y+KcqC4w&o1PoxL-KcCj*q|%i^l+Tbx#jGB>&NG?{c&xv4~tBk~gaK%>?O zzIrj2#F_jiiyE?tZ(<|%n>xa+uUX31qzqBH4O4Mt;$g5x^B$hV`hv~-7|M2e$L_OA zVo&rXnTy~w0iKY>rKM%cZNVL&zyD<`*E_r zDNEE!1SWnsMZTG*QFci$i3qq#cI4k8`@gK=cjak=weMfBe$jpWWx{=&C~pvyZutgG zwidyxI2mYK%z+hWXiv%7QZTY+8ko{>lq;y)0LvPE;KuJ%mxOb;s2oYoC?*H~=D)#V z7WZ+{YiVfKbRIlNj|JDE6uB7IO)}P&<3+7q@W0kLP$e^j6+S#B-iOjjSkng>tuPmk zs>QM%d(4TNYCpEWycW(WQv)BN0{gS{59j~s060|ti5-fw0PptXVy}M^K!s+rO}FQ9 z;`5eUJ`Q&R@nLt^tTI)ovV4FwPEExRPwgZdL<;c783^6;yKDWTGl@6`mEh1zGVJn2 zPT*LXDtP;_9&dfS0P|y-*dYE4a`64n#_~QBa(nb4&K;Nx4j%M_3Icf$R3cAKtm?N} zJzF24t>>)>>GF+qy+VQw}EcQLdYt+ z!R9lH@Vee=IQ;EAG`Q;xO=*Xn_Oow5G_4KacRK>@B>!WNm6~DEwnt# z_W@t7qZoYOAptgYoC6+W4eXJKc-?5ZZ?Z?jAh%T?dY4n!;-}XTX7(8DPVsN%(749YCh%*&v-F za&3MYSe;}=-!}#Xq@5v3YG=p|5z68H?;i{qGJxNfl#{#1rjcolYk+xv7JO)`hi`PO zBk!f>;Qww@mh3ADkQSeXzu%pS9hw&7R|{N0XS)r!R`puwZ7YK7>!y-gEpME8>JO;( z|Ay6{+T-xu_u+xWRb+FzRnxEEg=DE!AFMcjhAGlhY41Nnp-7 zwttH$*khE$9zXjLzwvehulA~w$=#{M>M}#UJSCu9+89Kv7{Pm0ju046*^aVF;JS-H zKKA+@Yo(+D9(Vw-aNT?^pDo7cT6j>m<|=f$*a4OVDUlY3Ghp@4_n@e`94aIx0iL`K zzVY8Vf)-}5eJf4i%1hd;^@nG~u2YL7-D}2K@qfVhte2cjPdg?Zudzm~DV($S6Lgm{ zf%!hNc&`65wy#+Zx=;TEwjTWgmi#23^~W?KZhx3HdwI|LhMg(5A+`cLU=z4D?+W;D z{drc~bu)ldr$VJk8==dt7N8s=2_?7h0+Ql$poW18?A$5`3Qd(*ajQbC(LBoD&ilm` zL?dv}BoI{hnqz}fZxCXy3z+d4uwrE=@O143PvvQk?0$MC2Vfv|;v=xPJP3k(pOR67 zXv&ctVXgCfOFkQv-TwAMMW!*5(*qQVu}cFP^?3hyE_(%xeigD$W`(ipDS;)C<^ zc9R+TuDI696Pg6Z!NLeDIJ^%rrDeKo6mQ-%PY zVDS1=GaeZ8AZ)A;NjPKDG=p|#teOOfNM#l$b^Qo9^S%N5&ix4Po|}#nm9B8A<6&^7 zZYNZ`kPYXxp2Y*-CId{QB)B7j$uTDMHn^8 zF8Q6qFxtOGm{W;7LGdAmxkLFp4s@nL^f^I}Y6`o?8D?fDeYUv9V{WGKm;#!U8+7ym zd*w2@6K{Jtw#@`5uUw2L>aqCsv;llPvk*s7|Kt-rdF)mG3_RTa8@PqYA}J6J$1}{p zsiI??i!$y0aEKuD94C|HLwEU$Efm!JrGZly5x) zzTNf{SgIJ~4gL1;abYr zlia@NNOP+B;Oa3m@MXeUIIygPl$+Z?%Q9cM^MNIXQkHmzhbU>&n1GMwX5ec}W)X=5 zzy(FCkiz5?@O4azi?S~zQ+C$kiI+c@ZDXga&>oq!a&hnz2g?v$gxIH5XT1My1Ix!~Yk{IoqW0allde=wc37JsHZ$^sta?Lza1lpCKyJ*1vgOVy%#5-pnXoc)UtY^Fb_k#a`; zXvg8ie|n^-eg}7Aqb^=Px`(n1AsP5Kos=yYhXvB$cox$cmm$2(}V|riv%}tmq%C?90QY}IsyWZLaR;en2LsXYK$=i>w zW>X!!q!Ndoxd^W8N(bIf+sVnk%_IaRkm;4T!IolP(}}5RSl`+eO_>^myAHOJ#+R9} z#VelNxsgkL{&ppm-jZ;@N*C(OH*zoT>y!CU`nZ9P>Con#i}3j5G|=T=0iqnb!TiyD z@XYExzIH1XuWY`B+ZIZa_JWgm{kxsyM5-*=^U9p8zR8e~7nQ_Y;~M^%pMk&0?!jTb z5Uou4oCf6Dh1c-u3dTDy?H(0$RADES$Ya|tTrSH+b82^ z!$;Y?JQ=WVV16u#RoFLQmQ)l?CsTij!+WK3p@PX&c=3@w zx#3VxqL4D&+`vQGLNRIvsv>E7tVr0267VQZA5O4K0m?Fpy}Zc zCm*X~U9D1xR(Lm-?38CGE|3CX=SCdn^b-FtcO|;h+SrW@r+5 z_wyD$^zksd1Lwh_;MH=S&d?8FNOYRRMeRb<)AEHLZc zI*>SZE9hC2PdszZ5>e@sM0CG+)2*K4#5E>?XsxOQgmxTO`!8;&-try$``!laU$bG5 zL>$!K*a2%!vT&+R9V}zqiIk@j)MMfv zz6RNG;}MXKGs1_lF+T9n3SWCw4+b1|Kv};%IO>Ndk$OJ~D^6>IEvj0`Q{*i!Ox@S? zJO`4Ya&^>6``8obeia(Xt^*?v@>tp6HgG=fC^z&(4Q!>p-R_7BHW>-e@U-QXWR=es zESYf{yOnOh3v;fL=2xz`VND89UwRoRmkzT54{eC{3CeWeV8FL+mxkp5F8~>n`#=4F zvD^Y8y~ja8`eyKa-bbwZJ(-M;#p4vu*EZjF-N3)xoyg)%)Nl9qD68?g1V7lF#I4`9 z5YG1zCAOKziGptnuKj)++*U1uXWJEsm_-ZObYUTDDk}#KyUxQ0+H;Aiwk4S}bdnp_ zv*C fOHi{gWheDpvv$jwXdGMy7#OipY@}vWv z-Zcsru3e3UM`cl0d=xtLL03R19GfL5c=a7gnJR51rg zzi10`pP_=Ru_FpgnTR$NE<{tLqY%FR0v}Zv=Vr8NY!!NbXBx8kJ_JXeilV6sgV6u}6Xfx#2t5_kML8qGNaJ$<|9b<(m2g8YN5xKmS}j>0Gxk%0N!|^gU%mxM-4(f^hawEa(^X; zQtRfR-_LlqZvxzr|A;$%FL$5-(-r7s!v>_5*@#jjtk7CFd*pXM0(pwfMb_z(Xri|i zlKTjd=f(+0>*6ld`dkMs>XJt*j^2jLK5s@{TYKP*@G0oF+61JRtB({5CfJU}O+ZbO zPRQ%p9OV6y-U~gHkV&2{S}QC@*VA7?lVu*rx<(cG85W}k?=k35X@p85%TS+z71CAL zL(iwHq0^_nBl5)+O%-^f#(NGZ@?}39mR3V?mO3aX=Kwl1zX=x1Zi5|PO;G!<60|Dt z5c(@zf?imtpyIF}P-rNLRy0^5qhVRpAh`-%7wkk!9(khHyHk;s+ENs9I~?ul`T^f$ zxg+L{Cz>hZhpd&&&~%T}D5gaQDe&}>T8RYu!V^dG973XN`k`jCBI+yBK>r=AM$dQm zKvrfwlGF1-@u7#%zrVg{dyObsIDb8Qp5%;veE$l?+oe$H#p_Tk&m2uQ`2thFrJx3T zXEgb%5%Tcghnm{gqA7y|@al|%$c)$^=g3I3w=EV;v7CX-mwKb{l2-W2TmuDe_C;?q z+>v>h2C_h2XkyYDG|#{v4Zk~zOc!gS5dJ5)eJBOJvyVjEypNzKRDYU5U;EM%-b zj76^sr=jSPLFgvXK*&`Vg)de^rqwf$T(vvO-M9})NlrrSUR!igbuU``QyV$PuS9En z=u$5|n#iwwj2bQ_9hLsaSg z7e2YeLm}&u(Y$zPl$0?DQPxtF+&mHa+AKtaPztpvA4jQPbCKJxd(iI90@QE39$9Q@ zL&4XKkoNQh^v&BA?b=z0aywF?!!H)eoUua}c1I({>-S;x{(CS=RSdoT8i77%(A-e> z0@NpR7`g3NK!XQ=!L$RLkx=v()OOsC$k`Zlmh(gVj;o^|yz(ZoSd(nE@as2hH zGn!vA0sW8{MXO&;LctP2==H3Ba3a+X^QIc0PbL;<&I=9n6Fh_Oe!HP_^QNOLwh`T2 zasc&w*@m8GpGKEQRMGV5@@Ss$4>Z0ihLRQCkoU@S=MYLN=E)>tB|Q(91{6@51l!37%7^Sq7FrSv_r`liH|9xDrE&U?f4qB zgTAMMe-0qexeCZ)CFN4s$~0=1M2(B|bs@Y|<8wLvGZ??Q0lA%rX6z#~V7;RkwMRXjU^#J3hB*TKc;Ww`>H)0vEXFHb6wt>hJJEi97MX@rBk?>29Z0T4BVrjSet9CAbhn7wG0-K>0`slL>-8nh{m0#PV1xuWe9OY4d z?=401g3BRanTHyK0Wyx4MS9gLC@VD(l`nTe_l$YSs?8S_Wo|^$A`qoWuSVN8iZQ2_ z`XSf3r%<=+eHe0F8y(iSM}sR8(Dy^?NanR7>YBU@l^*&8KkKKU$Mn8oV(5;ZzllIA zUXDSZm#Qdu8}&$gOQOi~fAG^;f3$RO3c4}&9ZFpaLqFjXl(RhmA>-||%02|S|KBN0 zGkXueBF9dl?fdRq=r=4tV?#E`>8d-5iixDx^;HxvbsbHJQL+s`umM?q$wixGPuN;M zUWDekV`OwZ7Pa4xdd8|xEtqFc;b<1URApaWL zxVsQpu5+~Q`WuekXxu3yKy6w*Esfh2^fe!B~M2o(iMBCq9M%x@xk%rfC zn?#`eH?9(pEy5t+)RAcoHA|NA-} zo`HMZI`QngUT|aR9enlHGj@{IHIUvr$R(xK!B%4@__OT@)}Z^07oQ_=E!&HoI!o|5 z?RtFHtpu(X%!ihLn?OZa0y%7@0mf5~!B>On{L%wgYxW8A zv$NsnpLO`>g*cX?+d)OH2&zf9AVXanfwj>p=wx{X?^mwCzttXs$rF@e@HtWF_#uPu zyrCKImy>Dg9hwfahEhp{^h+qa!VdcGEF@4W9aK_1rX@ZdL{Po@i_aUj_V*)j<<}kV zgjg4zo|ypLT--S6$Q&#bw}D~m0(LI@1Zu~`fVoSp$qX*=kf&0eRzzJa_j%?cj&n&1E=tq7xgs8NwbE{ zlc^V*{;Uh@7{)?hM9{o|$9Pb`WuX)!xJ-)*56~=Y2=zVe6z~{*>PfGryBX@?WCHp{ z1-DmI{{;Pee!iGsma@3OQ-h(;U{t@DBPMuEd*fh^2xB@yL{O7QcLcQ}0{>hQMti%M z;L|9>%%~F+SS!*@?wAM@?8FmT$kKC<&RS~pEOQEhs6XoFGU4DG}pTf@B}|}s9!{vVb;>w&zX9~AEnV< z!#E?Tu@GTaQ~hXcpEz;)BE>yFTZX&(t$CGag${^|H=7+-`p|<(w_q6baYH{%wo%is^^1r~m-6t+IQ4E~9iHyY5fx#hJ2 zJf$3g&4LZE*<%q<9PW(gl^c+fTTYxIo&-gosj#`|Du`r{VH#7in|4gyqy8$d}QI&6((@*y-hGpqZ)cW+yQG+k8;`f-GT5?I-WZ=53beRL;7{b zz>=|4*d9(hw)|tj%sZ1ww&i1JH}f34QyL7rsW;-=ol*QUVlG)FodU_u1VAPRfB{jK z?ATw$1^)uT`A|Q8@xvP4mo0+V?hay=(*!g1qCnRv1}gu24*PS}ph}E3S>SSne3a_| z@bzpmx_A)3UnS3mSgwL{@56A=bS7S?2!-%htBBVl==1+oPBJ8DdUy2E)L2rE>soaVN&5E@1vOXA-i_32Z9QCD&RO zkj~e3*5UnfaG{GMshcAYgWc_6X&7ZPJU8dE;R^8d?NV~%f<5fMb49p&NjukOjX`z+ z_2C^;htE{JVc_5>Uee(N0`=yD-XrPY*-b^L(;*2}%AEmJ|IEG%$P}u}T*XtCXuyTd zJo2?i6aOjtf@z@@(eaAK7fVA3?Qeh|!$pXf)+B71ECeOAYcl>!7x;AN19!jXBnbjd zK<>ZkaPq$0_|&ExY?@t$9i$}5+X=NG=jI1)h29SkDq#t`c4|N?*)!1dngm&~SQ2Kf zHGzVS&EVsx6l^-$$j-|!hY#=eaJ%AY!^3swznA~SI|yuAwpgAf%`v2F{Q*-JUy z>UV_7Pkf2Rhtt@6WCnQ6dPoBeer z2#@^KC(XgfVUjh#I^$2kubaNG-cpmSh*T&0w)SynTDn2j(j-pw-Vrio<7!|M`-}^U z%6SF*{4dmdn*+nxt@oYBu^kayZ-|1;HN+&S}1(Ng<$2}_3-g;b@)-5 z=7T4il9=z;;ZL<<@@#B~tNo`)?h<`CP00$X&D6$;79QZ?yKp!wY9IK~Q3r*wt+a>! zCl@p)4)(}v;LwD4u%=BHzHQXN_eIS}_##nKUMM4+x`YqTRSt2FZY1O49ei8}|ADJa z9cBi$;Ibzk_?Y|>Ab0X2Td+M9Xl`<#I)eV;2ESX*%-esJ187YyTEB)G$I4lZovEr zhi~m40#>nGfwZ9rXcy+{|%3IDhq zhli{$x%YsDZEnD;{1z!N{Dq$kcYy50!MMR(9?a2t39?MWftb-AXs{s#KB|esL|P3j zIWUQ^v)7V@(`C5PkAYL0d%&_V6BsI$4rLzILZ94V@I2B3@4j-7?Opg4yH8pTHOE|` zeC!M^{fq@(W9%%fHa8$+w=dy#HBT~o;Rs&5gK!!zOhC8(5!gG|8+TU$xafl-xP9D( zt1|uwWCB#--g=JhYJ7pEQcuHmwx*=lat}T-IukT@{9$!6)JXQOYM|Dcj#VnH@Gb{q zF12tiJg}!6&xx5zg6_x?jl5{md$k%rwt5Ct_YPpwH78)?{y}d30TZxzeIiM%`a}xZ zjiBVB0f@3+#V@-TNM2vq1=UwCg7BR($kTfc#2UQGjGKG0`xFUsX6ZRB*^Gea_+eOK zrV2Xzo1wp32XH?t50^i9NQl8!yl>TGY_Wt#OWC?X!%Qz|@4b~wIvj@AweEsO-|PYD zLoiX7Ccs$_ym{pnoYhrGm~G36f!i!t^YSZ5*Y_o&c8lSTPu>6&D1xAAr+~4VJ!we3 z0tfD&2KCW?B(3cLDJh;za5e6- zkB$fR*QqDOMGu!z{cSA96@JY70#u%wk)E?7(D7|Bo>7^BO?kBIFt7sr$T-TbUr_<8 zV?{_P=K^0e7vUDUL@vi<3OPL#YvZRsllEt};(bT0u-hIf!g&wk7p{ufGb9e5yeMfD!MEkh`=xbLcD;+(7W7|p?($k3tp(L4{qYc%Jw8$C8 zk^hr&wN|-}0q=1+;`eSpS*G8NjprEPT3tU_TecBK7bLM&=L$gZo@_W=P!FRsV&JEn zYUI0OKHM}a3jNqFu}4%ImVMyeg5ug+8OZ?%6_5@?OK&Om_GV~j6JFontLq8OaJ(ScEw+u z!OKG6vLOjPw~!_prmu*JJ$>97VxhQQ0;pObj!XTA@sFYkuu6S7H~aBaGHu#=*nYwi z+}hhI9CiPOkGog0Q-lG~NmCK`-*Lr1I-PLP?M2Y8u9y@vQm|w2Ciw8$9L@{>3Vbs1 zAerw({F)Of`@)H`S>j1|VFTQ}xdf;z&ID6Dd*O4<*+P$*0vyaf1i@hfFts)pt7U59 zQvERaZ_;^qQY)FYd1ypj)ON6E`!3idfihBCKMk5^3W<+yGk6r;4T_)SlLYe+uBW*W zEKtk>N2?c;yP3zJXtW_2vReknD#YPoj~%RN7ej&)v%vg@LeeB{fJ?ULVx67z`F5W* z)Zeg)4Q|L}vnjW5*~oe}s#+FScpM=cew>7_EUD)ErW)9AQ^Cf6)-dDd7fy@vokO*4 zu%gsE;YpMN7rqgP>#BqCki&IcUy=Z}yG?>_{n2EKz#JaFG8->6+Y4XIl@pg$GQ^hX zK~;58ICDIkoSK!uebID=GA2}CFJ|$&9Sm07Ho!k4-j1Ei6~XjXv%z{PJ(e%F9xm~< z1QTlgVY=mP@{|`3Cl63|R@ESwKDixVxtMk3Tu3@Mcsv+NEhYG=V-p^})C45Q4wAIn z7$1@Sz`5Wc_)^0G{5>4O_H5aM%jVyOvYFlZI=*hRGhdp_wHU?+8y8U)%PO+-l?YMV z)QPXJIBN6wPyp7SKMANjQNb%4dcnMNi*ff90fu}BxYDH@dvG(c|3YtY?dc?JwxxbSgeNE+!DV{cAh^B+Z{K4|iu&KM1930Gvo)sN!`$0cFZCs(_oK=ARA*>h z70afWAo4S*0XSuT!J=lrS+nQYao8SXZcXGXkh6OXKiry*S7&MA`&y!nQ97@1i|9e_ zty?5<%(VsW=PbbMn1{geLozsU^AnqRyM}8#c|<5Ybrc6KIfzeo90T|J=5W@p70J67 z<3gpNDa4d!OS})C0RL!)@#y1!K-I?!7ryIY)yJE_fJ!9ZqU}#R)3>pu+&p~G za1duTRNI_(w*z2*6E~Ay2TMB6gC7$2@GNUJqSq@*bWF0Le2_iRm>>n-htKBXbwjyR z2`12@QwL{Xd;$h8M}R|uEKyTM`kiD(Pa%7+tgso(Nd((T) z_EII7@!AdEiEZGDK3Ld(2HLwF#3Jrez-aS4to}g` z*eSnd*XLAXW4fcsXfq(oDkNxEXaMZttpgj+?8A}6`-PkCv|=|APx{&NfPJQ-BsSC# zE?zkaqOJ@$&(#c{&7DYG50Bw2huxfaO&6|eGr-<`E%}%POqGBb zt*!-;DvRJYoeci=7nQ(}b0Vl=9S%-kic>%S!_LR1kYC$xv9;J5T5hxioqB8GXxk2M z&x3d}Rd*S%c9=^_1%+ghs3cVXo&YYTU&W3aEU?1wVr;Zu9*&C>5;+ zCOvOu8;*B?d1to*pizJoPL~nA?HX_?1yVdx6@~ELewde{PtN zgKY8wM~tG(Nbv+UIE#9~msXc@rUC;f7Iq$Nd2J5_V_!LRC7yhLQUKQ`rGbNce$%~3 zF}$$)A)fF>9ljjNBjZEupclOdVSA>-qK6*$9`zryb_Ae#;!u9-yHE*%gr(M(!`0JI;)ObPxYli_FlH{s%{o_LcdIKgJe-Fw zQnvh})1mOPVINm)FcE928Q>L`&)IR)g?PH2INmD#4kYNLfhDX3d2>GrUV8T#*ZA!x zQS02eDUtF}b|eqBIq1W)|5f234LNeA<0JbvodIWT3qa0D170q(ob`L(08By}!4d^| z@}2e}_9t`#9es1WpUDIJR=r}M_IS~G<_N4C6DP5K?O=HPEv`@Z1U^MOamw)lpl+1N z4YP|$JC_XaKJ>(Di+-|iHfWXeTVF14msk8hiq1Q(#`lfmry(k(Qdvb3DiKMl^ITVC zDDV=lvG5 zvgFJB+h89YdL%r7V`4dB^A=oG_zw1jT_W9>I_}!Jm?nIRDCzV0hl16BmI2a3$j*+3`5%#O70$Y2*7%*^?V&+bvDb1pB zR@;u)b-tZs;Ib2H;+>_x9^=rdrZvtq=#F^nAwv#Tf@MwcqYrX$K}J0Cvb4Bf+BN{vFsT}^^=Mr?8a0qDDI7= z2R*rr0jeAf7GS#X8-_;Hzg@m~#5NJiVqpwHh!Vw>=$7 zR=IKHaQhmRpZ_TJ%o4^RBJ zQ{8yvknng61mOhZ2QeM-YDaUAFW;nV|w!;1}ou|f|!r@;L?4w!7 zA49!SKh6$gzE7k>tsK~`g%QklW?K4CtQYf2;9K=mkx|(LhC@1Hn)_^JT=+{mP&BE9J%*vmId$+o>SPUtp3`TWsS>J~N& zqa7+}X7LfSI&nze|Idwmdz==Yrc)4o%@V$?a7MR%!fh-VUl%?;qnnAo*h|Bl3&LGt zdUP3+$6vlNwi?3kxT}9(I|wfeC*k8|Zn)!I4U93`0B^?|qE7S^DIs_Wj`G@!K5M+` zaNcsJOH{=@~qVR7r%Wd@RB(3@zC z4^?(Bf3!;d#|ClLQ^CPMoQomt?cwJ1bWl9ar~8ly^>-Z6+vpNCdKO}@Wk(@9r3@#W z(Z+>=ju=%rmczTw;PER~!TEW|(6`eAxmC~y*-ki0l=mF?fa(Q3+MUj?4dc10qAgDq z`yo%wg*+~BiDa!Wx_7hpP}qYuB8U1`o~WHFwd_>{ejyY1e)LUAz0nb?nqS1JLEWH5 z{&x6waWz_8O(D(A8S;uKH_TC|!vlpkyjwg|T0LkY#?5d+)!eKYA)D#oenBkLlW;mo%&$P*3}C1lB9XgM9QUy4nqkK`4r*Yls4i{yOG0E_EV*eaqEDXKd2 zagS-7Y^uxe=8TawQv4YwEJPQQVb&5VCqzUG_H;QYVKxa?3+a8%agJ>);$fgrtG58 zQKqPK;IF!cbrw1qB#`bZ9o(Yag1bhw=MjAbM{M#I{(5y5b*$6nwW+>vW5is1l{u6< zeGP@E)sZyW?IiSit-}eryRfKb58N{8bbM{VuK zI>VFruy1SV<6I||eTanEp=mg8zXMNdnaD|c0qB%&j&((;@{voAVC_S5{IqT~_PjBL zJ3v#^zjKo|oGt_Bj9qm7hc}NpyA|Ar*D32ibmi9L_wpn|Te(+nTbe!J8Z_VS#M5NT zturR^!t!kJ_Zknggx`Bh=oV^tY>L_cc2ecB%VZiK3g>D>M$_XKRGTzcE_LsL*F#kF z(QYHW?=lx3?jJ`%=6!MRsxDYR=AmSqFppujCpA3kfOoPR=vfo-_o0es%U|Iqy-DWsjSG}WG zPm1I^(L>&Ta6R@G_xRxk6=bs`58D);mP3E!a8mM2c3oT!SG+{7Sko9=uinUA9yq{j z?^<=`=mPrrswHmP)tow5KET{R!PsG&HyTfAgBF4x@qAN(@@nrK%oH>DA1+f_^Kf_Y zvkt`cK_lQ%bs9uZSx6@iBuHah48|RPJE5{;AsrgpZ);XhPc@nx9E z=d2YukKWp-U-SFLOz`i;{CYuj8y zwM%3G(%OmKlISfisS<3saiR+;@>17D*R$6y!GIGz1C2Bd1-()zviB<#1AA+#c8L7; zhPd)K;YkzMP4za7ziM+%3U+J0m>FO2KYhSF5R5Ql+6f z_*^`P67hY7GpRu22}*=}sr;ny;fh>^ZK8%kQ&;rF#n@YK2xW z$!^G8*2(^%-s|3pY9q|C|E}KXx?z|c>i1mo3%CZ6%RkUn|Lan^XNA%)=@k6C;0jO2 zC&>?o8PSu$+UPV*!ZRm^z_q>u`KIbJx#gLm-=8O>)4vonhkc}%^OLE;#T6&&x52yx z_SmreJ>5C7gHMkffcB?uNZ%52>BLD}x;xL2-D~w^t@l7~t z^+VZl<#bH82;;&?GaPfSusFNwfU?~kUvcgjBmXIVO!`d>xDY$zPRH5&JLo*kJbV&I z!%(pQ)0_vK)#rnPhcr%qJ!hIfSNC$3Xg<4O6S4Q*+OZIF&qq>=tVsB1Y0QiF8{(Cq zMtGbqURxENc){gdib$SHHJ^syodwbAoMnUX_9;KA_-v$n-E}3%4wrc2j%|=x)E~nR znbTyaFsi+2ipxYcqT>B6IK{rQdPJ7AyyOPx&HTn~34@%Z$p2Jd;MU8R%7X`HOk!wjft8BJHLtq)DIb4Ow zZKKK1*`Ci9SK!bGCuP*IV>`{$XcV{uHxzhq&yZ-WKClK~U0aU}f=|-6erM!H#Yp@f z>5HdkpFxMYE77&54Tm50!lA#fP*>bYrmZ`JwP78udApb@Zw39d8m;c z1>q9uLEcRE?lOi=3@6ZE?2ZqH?asX=HhSS!wT3F(kMKyX&*?vh5oQQo$lazie9iPg5 z3zDS{A-1sPmpP}`=E&z2#*nmdHXKupU;DMfoi`1! zuthE9FX;sRU;#hBeO_tl?T1yTR>7LLJ@M9qMbdbq1T?-`2Bzm4>G>3U^zIddT1o9_ zyut@x_#UHDX!M&E_sDn+M;vP#wP zp}_}&augWu`atPvzZ^Au&(msBhPNqTYZ<~F8_Gei)|bv4j)Uuo+MvDO5tSW}O78p2F*ClGd=z}} zy50}j>cLa_c2Nr{Xs-t!-c*P)(<*tt`$@_vUIZP-=fJri#%MJ$8wcM10KJa>lcoyZ zTwZV#P3a#&rD6tdytNZQ>LEH?dRM^yQa(;j5Izksr3=?5f$Q`M5Ot%T+-7d)3)UQy;bnMA-g4;%wWDeMmR(_DZzKm062WYoVN-ebSK^8aJ0*I>8<5F z>fk3_*}pf+JXZ>dyLe~LT@kHs#}=8>DAoTWNH*Wq&z9N~D4WoME0xsiWQ82l_c%=b(LvsNe>dzo_eTA*C|8PW zQv;8rYzp>u18mW#cM+pB>8z`M&a~ zp5O_Xxl)180orln6B%?}&86GTVAZ~VQs-$eq34*Ve`=YN@)BFPTf>b;= zXMi&1XiJ*--$A*4s|g1O9RL`$0@9n0;Na1z(5ZVr4ic2(22Hmt#x zGYtiMdOlZA2w;Pmce(n|0eY9*+~c%|7C-uTpB`;b<-s$)P@lrBuxyY!UfO(vzFMBa z5xR9;8xRK7sfB3o8-)$h9!_s)!+nLLF6WDnbnl2Z-tf%Bb#+5{Y=J)q71yE8sYpv6L#<5bIuw0(&Io z@suJKrE^WG13oE_;H%TFUBYeE<4?*$gDYc?5zWtz0&0`-?Vso zOJ8E-2Q*`f{3>~|Y&gf0rIB*`M2>Tu7Q@m-EctBA)5cA3q0vp0FhT+VNmm~ z{Gq6uvdQv`cwtl$rrNZ{tPyQkzosXax@D`!`dpBcKMYi!AKDg&+)rmkSv2%{5DWR5 zefi>_cIb5?N$|7vxb{~(bjTP93$9d4pB6WxE_!Q8`(`?Xn_uUN7i_VAW;_o6ZxT=X zF-`em;|dxU)R9U@B}=28T!!76?)<_~izl2kA(zg!>d^!Hz>n_D+2TtJtZ$smT~1Yi z<6TD%Ugbu8{>#U^y7SS>_LKBJs~S^AAClgDd_pn1M&rKi(em8Dy;QLy6>lEN!JUu$ z)AEFR%x;XN8|A0v`+eg0m7fPXytxgMWvW~-xL--i6&u-q@iH{(eiCEWs$l&PO??03 zGAKap@E zPa7&eZ6}WxZNyH@MX>G7`8yMVQLmn`|eC(U?&iwZg``lgP8AF_4sCBd+`K{+S( z$-gR2!P=%HI5zhQ)Q-xAYrW?1v#ZI1O*E18@F+=hE9j(qPm%GuLmnen@Tp5CkezCe z9~U=LlVb^BIpI8MzZ=1`_qM`c0Z-&{Q*X&jZaU-07Qd*$S(9eEy^@dgTrQniZ-n1` zdts4RD}Fw51YKJ+jW&O6#&6Ee;)jVdaqX5InE&@DEa_v3g%O|CciRT=u;VxBt7|wI zeO;&YYS$X`*4(2wQ!{R*V@Qqt4^XqL2^@S~!3WA7!Z&3jq>eITH)S|a{a$R7+ALZD&tePByefW{;oe!8@ZPG)>)%+rFmI`IN^rxN0W&r8>}yW}6|Q@tw4IY#{C4V=3=Q z)ht~<-;B5yH3np&2f%bbzn3 z&OqW`FB*6=mhT4cA(J0sKjE>8Yj&$4WqLE**1iiL757pmGxmVZ=t=VG&`kC!egOB9 ztT97xHqUUirIH#q?2$hUT{|77dG5#L(dIAYsQ~Q-d7B@G zB=d+Ck?h&6H&muJ2lL54;po0$%JCS?>(}Lzy4049U)l;+T~jQrpGW0e$4Y<3R?}VE z4y0_zqe0bwNyGA)a>2=O@=uLWbkRwaK3q^s`FYLMw@*fMa<&;(7cC{fYfFtWb}Tpw%%di$@?%c+y!ZPHDDG^|#`;b8!+++qY_T48 zpE(Hjx@=;X#s{GFZh?A&kGA4O6Tz>)tWX7+XebVgIZ0@)hU(Bbg`!0CAtFTw;?j66 z#rIR$E~#7+O=pG*l*nYqwi?qi#hBE}&rA zm8WZ{9#tt+FGWY><~7027_6zf+bDV_Vy2PZAY9&p!7*6OKYohY&M(2|@R%fKI~Ns- z-xoyRqEK)j^feSw!U;CfU(7xPf6qTeQFUMn3n@#9?U^O4nu#^sj9EYWk zuhX~O_55OY0H(KgXUA^MA-ZKJ7(cmIKD4Z}wEX-<`n%?#Ty_7n@`2ZS^03c?z+Ry= zBw;Gsjfy}mr8j;aAUv+iVtD>`Cv3YzJUk z5C5emqE|4^E}r|Hy)HevVTWTIj^dk}8aT223tA>})BSrmtJbWOY`43} zx5Uil`wS&+x*x$yhi!uuCT*3mF80!Q+Z`M_rwf(Mc_c60aa5Y_JP3b$NP>cS4xDB) zMs7MXhI21ah5rM~!IH(77y2wojlPtQb^@t4aaX!qhD9BG(^`r@4Z>b9?V zd9pif6Mk@?YlgUXbs|s94#GKUlkv>(IoRMhpZi`erGYtl7*oAS9Fn^13` zh5YJ6J`4ywPAix02aTy8G;uty~Uu2AJ9$5l!ORNJzqBoW4W+PB7`^6(yfYPlJPe!eNUx%HHuet1E;J&YjJNDnKL7Qq9JQYzoz z#A}CS;%w&#YGJa4!%Ov`&rv1VG>?_#KC95kV+MXoFrkjiAIM)GDbQJKCaiX=(U)fRetp_XQilRvBKYxcDt~e&E?`iSg#X9o5X(mhoL7efbIa&@r zj7JOr-h8#DPOE|Ti}Rf0<4xGM@e1sFYtPTCW^nr99WYq?3>vMRC10_=TYTTH5gx`i zO0zGYq{QyRXRb4Zju>0PnjLlEE^>4J1xAe8ec(p<3VHpHPTcjyf)W=G1&(~13SFlr zQuV52`h9f2x`W8qZ`<>pBKmgX|88%Af7z|&lA1lzpSM4yUv*x5D84gqX;%iu3kLAJ z0R}kwP&p+{w}Q+EC3Gk1BuJlk%f|=bgnO=AAgrPTw!3rzDvzwg)L!%C`U3-T`+FB& z)M*rFE&K#}3x+Fq7kf~{rxYl>dPl16=!DhARq~T5i@5*fG3uY=y0BNQ4W9{Bi;ha6 zbnx(OKCse<7j$u_UNUqmU%Pq-;sP(yqnh4u@Kz)B8M6$pI_B`G_R+Z5K*_$QZFrvA zf`=RR#iJUVdDW7;Q003Nmxm|Ae?#6%KYyno#dJd4as_SX#!=J0OQ3Z^cWm!^4Sqc| z;Pm(R7sdn@PhSC?FhzTQ>%6q>O8-I0)gXaIawCefhlGx>%6cgmfn%XgX& zXMJ%F*}vfvh3hPay?uLd^BQNl>e5{7@NH*yS>t440cb1Hv*m3NW!T8=o_*83cgzI~~I z27e1cqiN0Hmg#!PeEfj?hLU>m{=MWmr%`Ryvl*T|UJoz(ZI|__6#DXu5uf*2ecapzBJUcJUky6Aro9$T~=vx*xWPGefVX+o_w)W4hV79KN&+ z!Z-C_Aig38di{&x6URDHl}87*>~GCy#yR4QOYS(zG!`!H3WQ#JhC{wlH%@O}AdQ}2 z%j@&Eaqz#EIPP6%bUOY|_>sF|m)mN7l=UA}ZyQSQB_*B{{D?Ih*I|?4?fLA?Y`G|5 zFZS91+;YYQTHdjqUZ2}QXV1+?^8hp6=G6u$K{&W-3g~4MD|}aa4BDXqZt&~Et48ZU zi{!=hJNG)Z-Q$6S&L=^8aZhu&R!Jor0;$Qejoe|_Y4lpXhKnm&Nw#0L@lL<<=(3;$ z%1xSJzZx@Guvg?mlhbI}pPugO5l*Oh8;w~5CgV!4lN46gl6x<%O@dn>QbC{y)Ax8UMzAXe|T}1|6ogSEiK!2971P* zkbcQ#bgpwh>3b_b&c3R`E%&-o;6D>^Y=z{My=b*oH3~XQXK!Aqw zsFON^ulRq2Qx&=(x7YQ`GR4X2}W<$WW53A(%?TTnw*%+vJP)X_&MbqJY>&6TBr8pO8=~RG zW6HH&yOd=Y`>^As7#fylj!~5nw1f+35|8Zgfu`?r;ul$F^g2HlM$JD=86EXJ ze%k#%yEYC_oV_V;oqmrr_xD4;Jx9Q+%TVsTcr+y@ZewFBnPY2%agNg_JaR~d-dc$i zA~>_lm+r=GkGA5nRZ;BvI0!~8Sd1xEn&Lh03o2 zS1&*6o+&qZSC5}=#nKeJS$yihP3nsFjL)9Ul27a}m2kNpK6m^}+Z;VevFfj6y~`il zJkuq!clYSt`AgFG##^XqF8qLsHhgJfD_Rz<4UUIAl~WJa%HtP1;60s1q}#U!%)(+( zUG7Z{i`v1aD|-C$BFd}FET~BT8SM+L1>=tqpb8j6H#X`}*#b*BaIkO=rI}*+zpZrJ zas>8HzAJY#RVc4Yo8hXqEswQ&LjLXgP)V8{&HL*hT{S9(-gamTj@_q2T%|IBf?SzBJs=-P5nqFK#EP0vj#I)2N+|a2I*58YU%fW}F zHF1Y{RpW74Yos1N+;NSLDId`@?{?_0V+kJG_majZUV^QSwixG<#fx?g6zAKb7jZm> zuaB;S4Be>^xveeqHEzly=I*5g-xKnVsttT1$BK_do64_>M#1a4Z|aaPb7`YPtnfBD za8I+f(%Q{YI3i=8JoCF6`Dli6qwQ{GeBolb==BNtzsH$u=x7OZ9AClPv%Xl^4|qXF|H(Yj@&e2sqRSSM*VGNg&S>{W z!Y8W(N#4GQ`dRovdZ@m5UA2HQCw6f{zujzOHVXrnjpb8LJyE5gW6B48!gp0;hi(GpxSt> zT(-8(-EQ9`Xs+2&4k!x5q+9WH_MdQNb#|b%Y8RZEI76xI{=yykew~;v2@cJK8)80HsHs|!rLEW`7^TncG!)yz#n%OA z@4`k+#SjlMGkUL~=wqXyc#@}3>Gac7gdI_+>;i;qQb)L6bhT8!u4<@MJ_;2%XsF%@ zr{(rrf>H8IL(xKTPW2vXD+YJcRLR0IQxdCCtZS~Ja<3Mib}{=}*j7u?dX=UkE>A-d zEZ8hTXT)4e`0pZxVq$N#mMR`J6$8b;AJbG@Wp_uRxco+;Xji4F$`{<)*j5Tvj)N;} zx~J0ZvPEb;{W^&>79Wv6DSt@jsC!{LrZnABGJkv=I+*sx`bpZ*BsE>0QQjKLRBK5w zXMYJ>wUwUt6`a%6>Cim1QrebvQMO52FI|}Y7C!E?p%vpc;MH;;s0n^88xPqD zyQdwZg$@Pqv)OuBk(xne%Pn~I`}G*>Ebcv4iUX@dEm7-WZ+^M<8r&O_0ST*9F;MyiJGSMc zv-fd%h1Ol^jn7{)Tz?zFcifd%ez(UvN!e7L+>-t2B)zwtF2B_bz-`szpdi9jZecc* z4_x?3KOg9^S-CcD{GJXLp`&s9IWyrtI8E2nvZ=N6Nm#I@0X#PEv|8?-N6C<)3y^jcWn9ciaej_x$DezUg%Ss{$8Y^#ZFY`XDE|!MYv+YK!MZQuf8x zFkwV1>QY_Lt(ux)$X_ow_ge!qbA#mJwhoXN*_Ufqn{u$_V0vLK=4ak9xbJk4U>F5R zt!uMbGiEiv>kQ};unEV`5nf@>M40mH2TodJ#Dy+{g(IaJn%@oM%FAQXslzHz-4Exs zD}Iv85)b@o@R>|5M^kCLIpmO83t<^5j0o?5Tb8URDRUqXvi%0jpKDWNk~@dYsV2h( z;%s=K4E||TD04y%#w@x_$^Tp+ce}5;+b)sAHS3OVQ>#e3c`veVJ5mOp57HHjy>xuj z9~d_~9`!r#P&*l1hs>peX^z!zO353+LzCa&`0#oP@oi4^zp9~&$Si+*9R;m-MBslWp&T#_gY=ZejLkd&gWv==wxtjzKWNXYjTXKKZbYBb8Gg) z19!bBk*!Cyr;Y0OFg?!*t@Un5%hP|tv~YG?xJx)Ho=AMicO(tG`x4d+EatyUg7JL9 zUs_yc!n+HaL+jjFOtn8QAN?^E>+S26M>0B+_w!B|b1ECI7#OmX-YWj~wk1`4JC0x4 zl*05GD*AJxBQNdN4-+S)!G6JgH!FysB`eMqL=tTX6X%mC$9w2XuTx z8>!Fn4){j%J3Opeh##DtSt0E3SwQ=l2W8;I$s7LBDeZ8w$qmvbD)<8{&-?SA2zUOwG^6 zE}{Xur_;v$y5ziR3CT0BLYv#YrOtndXZ#u=@=V9+{3ugySG-pq7yC}`U3ra0MSqeT zecR#i?E~>vQ5LV7yd3%-OX0vU#zfn*?rwsYapv8Am^?BGx;_h5I(u)${}PL-lq&bK6!_S#N^Yh1a3^mL6y^WRP;f zFc;YQ-Vmy~-l5)ad!tW!1bo|UDRsZm4>Hy{s?&e&;?5@jz|vDK4RY-bzZG5(PZzLd zeF9Ff?Z8*QRe+|mHe67IQp}Pvxzyq;q_=QJ&2w$2=l%&ee9=%gcT?foGoPW>*OPsZ zzoqBH74+6n^vmk*!AKY37peE;*Rx+kl)oVl8j*~VbC$@9ZuU|~UJj9(2Iawz@d~~s zI-$cd+F_2i7LKh-hIefur6Uc!+2l$XRUL8QFwLtlzq{z*>2HIaS)bt0JTtyK;(@e& z=r{05a}nZ^F!@qq3;vgp4EBmBjLw^ixxoprG|ZYZ^UJAja~sH=_XNW0=E1Tx;v|X@y?5%R?g^4+wy7pMM-#(7wtrm02n=jJ8UE;je@U`UhIf0y# zdSTl~+U#&f3+p1+OHa=Z!CP(5pW19uf*~3=F5}= z`iriRp*RD4-jd8NCy-`tGd|XlVNH3NR5gAiE^eB~8K=eXz58JZ?EkRjSNlZLwRS?! zxGKomv5vdxucsB=8s&z5^Q68vuS)*aTX}Z#YjpdRF|~-!gAD8IyiVf;TFWT;RDSBh+~@WiDVC{!u86^exmMZULMp~w_|T1n)X@A@kg zNh>u}Pdf_s*%wV!i%M<9|GCoB-f1X`1)DLDG*qhvtF4#t#dXONeimO1g;|_nq}yq! z?xTh(Ax!LFsx=i8_iCtUtl*akZ&g3xc3JjBTVdj@p?dvPc<4H5sfGx~eZn{m#rx43 zs^87D6#YbYdT^;i^;zsodh3agB6~eb>?0oE&{R~6)=+G2r%+rF8EQ9?Z*HPgC}xN~ z(!>G{)mFhvv)-yuZTzRHFczG+orT(}x6TU1M{za?A{QMcuA9uY6x~TUXY|KKj#}oU!)i=&V zvkgl*F!c{SS)z?TtMa7E$sg%NmrZ@xN(VWAAKPWvL(kKmc(!ymeHZ;dXB`LSt0vo( zwyQN@hP}2N^71z1>I{;PG+1-ovmc_Ne~d2uDl&)#H-7EXv2ylu>M9os8^K0 zmj%_*&4J=fAC`q46NAv{Zq)C=EKc~ND%N#$y56o@FT51G$JS+vD+xRUXy{VwrSyJw!)cf z7g1lq?3h1PgFj5!iWa9$xh7A{_hQ!2Xq$h^2e*o(g+GQ$<8%L$O1h22l=wwBtotb3 zEv3VflM$e)5r&D4y7)1ED*Jh4Qrg)uymVZQa?&I#$zq8kH!(ja#RYbNmC3f~b8|ia zikmOam-4}+{u%6k+!~Thyd;HBZ))|u7F^ngQe@U)YLn2LL#ND^+i2bgqjynK_6dKf z%Eel#l6S-BiVAgHU_3uH>?&98+yYj$c~Cvahdpj~k(c}FaEgbL-S2;vJ~~g~omTlg z?@e1axzt{cUcLto_GnLI&(=vky+%u?PMwoOf;Q8aE(x^%mdI%3v_k!058)~+gi7bu z-0Rj|Da5#gcwNkgIFV&~m2m_{*t+2NBWd7SlZ26DG;l!AJCvc{83MoP;3+RnFjBM@#5Zi@BtVZVJ03d|9~JT$`}tS}Tq?c}uhjPSVMp59vk997%tv#OM9?P?H6%utIk#b*@t4 zf3+jP+;svEao9-JSEu9U%Q3v~@H(|g;B)%tJqCkUD%Jk!ci_iMD_B@<&g=V!5xzc- z8KQ%n9sHI|FaLnM1ub!zZHkLgjw2k&t&-~67Ayap%z;b)gv+3A2_BwiM}u~YGt@rC zlwb4}YTM70<`1*uP}gGkXV!$R5*JBb;|lmx|Ne6I@>aY-9z_2Yo`k;v59N{3b3ps@ zdf@6X9985dMc(_TT)k``b-9vE6N@fODR)kQh`!91r{|(!&FHFT5sVCQ&XP^IEB`de&E4)$6vVB4~@2YC<>RjTQdK&5IPOwm&pgQ^agw z(E<%si}?!G4J)yBFV|E&%}^)?iO-3pUlDsycp{HztF{QHs)e>fwZ>ebTEAOU<+@Ny zvBOKDs#>P0GV7?JxYbnL-=n7DZGnbjwdgYq+ax?HUcxUCr>WqX;&Ec1@$jLR!grvC zVy(DFpY0lo=<8aFMJAf683z=KQxC-NVuC_7%T+@)d6(!fiR*k3rlqp66n!VLZrg}8 zxSfYs|3%j3*;Gx%!@||5w{9HYTiF$L3lD&Kzr`Ts_TlkMtw8%j1LceSm9$tBb!!r! zi~BK{=C98yGZWEmL{scLx4oqGGMH~aPA^V<(vO}~Ddf}y(6gJ`Xtieqn%Q-w*rRIc z#bgbfUgk(Wx^$J}XIR3zXGIYECK{H!oKHimMzQ97cXIyOSmIf{9nBwCL(9Gybm;6c zYGzw1WxS}Mh4)(W?E)2zTf0OVm^liUDyHxYYdz{ZUl)3Jx+xv+XAi#*uEjaQho!D_ z&FO8X30{mI!kd>QQPZ*epa@#=yk?1V(rzQF%k2$D-kUh6qngIVRnk_cBp4j7#XU@f z=TT#obRvAO)H=wPT@xqK2LBx}QZP3Sw|s@Fn@#xCtowYmRvRw*tl)R1QTX&=b5_i^ zgq}?qMC(QF_1R(1(%rv~{=3|Z z`{ewB(Hmnh0Pg4pz@&?l8Jd>YR50O6pvu2|`_hg4fR`BHDH|o%0 zBKdy2D@8k3@c5qTa#-9dwdSS+)Y;LM8%AD-OS4PB@Kh)zExIX9I5&#V&1{F6BRr@w z#YkOXF^bcSHSqb*K>4sqK0Uv3Pqs21k9I@XaaN%VT%NKPtVDiDYjA6D_4)#Tmha?y zHnnuJ$A0W#lSx?4ea-UJUG!M^RR;3;9o#{-33*PHv z05`qWDMD5(x1Z33?Re%1$;;#LT2>TWD_Y@>O*|8CTrB0!afCSm-#~4vaK`TC8vNX2 zFs0v;urbsNHYO{;cz-z5=0F3Un`vG>qBaZKcTbJF$`EI2Zd@^7P8}kG<>xr z4|;tKR9??wOYDu<+;iFDx_E{a|C1w{|; z=My;ZIJHO+)b|KN!94ym_rpc*mMI=^9`lQzf+Cd9w#f#9BG^KvFz2%&<-Cf*sEvF7 z6sm#|*EeH&s4C|3d3J1;svzSt?;`j5jp2DkPoAqh&Aoh!`MKde@7x}o$j>6Kg0Q+? zP|WPa{eeBX&U#B#@%V)x#0x6I$9h#o|COqW6ANSt{eCjVZ2tFFaXxd}13}@(uQjHT z^OyNdE*&Zp?w=6^iO=h#eS+feH9jZ#bu>o_T-fGap4Eb|#9l?X$n%oL<*JJBXJv{E zex@qry#&N(tAg=Nse_uZzJ&LFU*H{KTo+EACsSx_=KtPb6-6}XmYTFX(BZq~pauKcpsK<2C88&5d@F}e zZyd#Qj+|L*(VJ~<9u79W_OX(@trT~y8#AS3QtOz8zlV;*zOz@6c`rj;mtO>HZ9QQ1 z&^_Smu7k}x18I$Z8wRe8!IRPMXy4p}O-|w3)7{N5#-@PA%t(OLUW?fuvyITJ^APst zN+fuBl~UEjX>j4F9veJ1yP~z&RAL5i@IBu@P1}AMB6}-YDBtmvg^gz`7QcbT?Okc< zr%0l(oA7RUytr(06Sz$}0=sXn#I9GAWH};$sm*hiXT2g;jVUlaTpi5^%Fx5830J*b zKrWBQU>}QZDEpa98?#N6mwET=y*V;UY8Qr^N9}|!YMZcd&2RKP*OP4g1L#Hl0#-R9 zol(n0c3bfY#+;f(xw8_WOZO679T0+Byn3;!jeB6_?n{up@*F;z)fw836-l2ywZd_C zIo=96AwObQ3NMZiV*lMTr@s$7(Tek{ae3DUh$xGOT<<8Hy2uSb9Py^%dJ$56&?M+} zYZzLL9!m@MpJpm$8$rC73DaHzKHE8xd2if@pr zG@TXAe8{_J?}^RVx1oyqNwN2u27KSI2Ro>1N6%Fav142q)BIz|lDwQ?-04m@$b1q0 zwmZSLywyS<69cGgy@l3Zk({|bhMAW6;lL1ea`(yPU5hE`IbtOQMK((Lc5UL`oqX@N zG#zG}U4B6->7~}T<45sbk_lgu4 z{$vX+iWov4^rC4+>Qh|C&$ff|?tt*#kF;Z2pyKOycr$-B`@(l`pW@Z&%oZ(H-AyUl zs;IN-<|p8)l}stO z^W8nXJL|VNa*F|x%SSxznuLST7l5PwZZ>TO_v1&}acVhyTfA_3Ij(zj3_m~8rWqqbVXq0_!CIS3 z9rb^q_7WM{at-`M$2=?={}6=DeOXI_3$yAlin&!Cr6VB?_#mMxTYc;#zCB%qhC2E* zu4o2(yJ({F(D--uhdb>?ERX-N6DlI!hzn{4 z(f2(+_kEQSpHPy5-^36>;Df(2O0 zk$kFZKNxZ04Oq>omPX#!W#KpX%Lgu*g;$SG21+kNMfWnabh$3J`@-{7!7&&fcnO~b z^~b*69&o#Z00||HN{>Sp;MP6{=TZ{J#7zMeHy0eNISUVIeUa9kOtPPTw-k-fjwQ9N zCvjr2ps>2acXxhV|C+#`tNGdE@_%ZIReYyu&7aXVyanNgt4z@G8z?q;C*%L`0p7Rs|){l5A;>;vu}}jS7wQzFwo>@1g`x( z<@Zac!Q30qdm?(9sR)rgBM`wk1*=U2p)FhxUV5u4{%{>Iop%n^^NxovcV)tp*Med$ zzn}iBqLLV2IF#0xsrWh(yq^oi^!cG-MOrRi$X@Vk6O{O@_JMZ4bU%^va4U+P`6WB$k z64SLJ6`nT=m=;P)GZ_`DWWZ*lff#tPJ8FEfqMy?XS+j?k^q=Kj%&{-3=&pJl)dohg zr>0r-Dmn^=osOdJZMQ^uZ&Rw9ZA_M*XR;0QCCpn`1?_+C7herE<=xcz)Og`JI=gqF z6J~>1sOfQ-w!AY1Hbvnf_Y@qPcmi`$X0gc+5|z8MGs)7l2YTMShmns)(CntMG+;uc zSpG2tOCA@%Jh>BCxm_V88`;s+@E>AChPw2(`6}Lg7z|$rd9n|G7NTpv zGG?5%9;0Hu!P9l77@4=4N@iSuUQ1RX1u0-^;ZbPm*Qw(CSs&1^A4c(iAJCK96uc6V z1%3lAOH$r`Y-~-D-iPN>N`@bN7EG~UTcxPJ!-@3YuYxf#-KCr^{8%bIo*e8L{ z(9yH0WI?MmXniyKT&crz=ese@c4x7A{aW#x$`&zrf)49acvXUoatNqUqt4l{FekGC zYD&!5hWi8TF|ANeFA0-W#$yYtZ`*hvklQM$!S>JCcW9It_he zP0#h}@ZUBKc(67c<`z8{O{SS+UYIUDIS|E?bBpkrU4Zo9Kc3ZGS1Dd7G-03TIa1p7 ztEhh@focwN55luP_)oGB=Uj-z6YKeTCME@s^#803iQ~Pa*IwX}jxVGhsqS#?qzj8^ zT#N5{X6f*ML+J*8=0D$eAluCSwf4tXK<|VUDiB(6Z=V~y3*;6IcRC@?G~dPEAL&8- zeoP}nxPNZvFC09y1N&;S9=)>k*mc=Gygd96D_`^#;|-Eo;GJf?>QE?VSAB!?ZE@Is zvLW62vkZ>EE@o3ahl&+bzd^&ZM@m-I85Q$nq*?nzyw_Cn|&HFldhSsV3uwZSW@al@{ZMT$X>bz z5*xLowkMA`6QM6N+&qZ<$2|f;;GR;OcskZ6k}Z(yp6TiRTx?0u#I%5Al8?hAI`P|@ zUA?D9>5e8WF0LEd+`J^3^o)hOv**zjot+eY$b+hjU*mvNr$BpNvuH3t-N9z8D%tkf z!B*|uE%w{vNT2s-fpCjwFaNHlF1*`#jd-6$#p*+W?;<+SM$p-7P2_v}5Ef&k`10;o zI@_}xo!`u4&wR~UPW#c!eC=fT)iIXtRMf#VSAo_g6~c^X^Vt0*k}~1@M7n!89%r5 znC9&cVfkh(ot|T2V=H^{Xd>R$m;=XalhM%1hV4AoBv!5SfrE7yV9kPEX!a%#o#X7G zzvT*AUVL7BpGeAoty|gM-v=@8&RJHrHw{;(Tk@_5eXy*P5ou*T=j)gWmy=9n{(vTsgeMoE2G zmxPn>HqnV@%|6VUZ?DGs9)aN0xsvayKZ%e!i-j+H1*2B|fYTmT?1;aDu&a1M?u98bC^ZDf$a5gSDsJl%$lm6fe0G#$g53^@HArOnon>o z@3Nj;JB!WzCu8exJmP!I*DpqGVqe9%Wgz& zMn$g^6tmof1$K$TDVL4JTUSQGxcCQh;ZPf1IGjdF_r~I7?x*bfqCFkAXh&m~jNp8S zT-F#C4_7=@ry?zdO0|5o{9oEujZ@MyXHcspqg=4#oqz$c`p5C{=thO2- zaOTb*ZDd*r!|2Y`Ij}6kodz0ppw#($S$Res{PLYkCa1FPzuEL-^*`R@356O|uis2z zV|ucwSM=#trv$buaTn!wvcSCyHsJ;1-Dv7=M!EARv$W${I4`G+ww@?O|H&fd>LyBC zb#LL>DuEq7w3B80X~i?q5!iKofz(~gSlq4GhfZlcVD_(4CExzZSl3pKjhj^|W|1nX zZr;EWTZ|;f(Uqti_XNWi7SeskN_e4mg7f$DS*Wr-3oeSq;2Gm+Zoza`$^F_U(cNLn zq>rFGa|>)3dLJ*X?#od0*Q`ns`tLeMO9f*pwWvlL2vnUbR=4=&m8!M90h*UQB^Z;hM zwN{=twLdeD_GLkZ-B@XJy<|P{JqDCH)A`&zyyqt#suc;$++Z#nw5|uc+CCf8KX`#U zCBubvFF9k>UaGL{i7uZUP$`=&Ka|k|^K@K6xg`kthu&mLuQ>Re`5YZ|e$YovR~96< zV#;^BAOF4L* zjIA}U2JImec)!2_+$XK$x@RX=aLt*FLbNHytSh{GbQ!#C^04Ih6t?8_6=`jK7-7{$ zr1cZn=I36_dC?3yIm(KzE>>Y>^P{Ah_YdIy0|Gs~Q=_pxogP#aYT^Hf_meNKMy->0ar8 zrq`udA0zzd?ZKkQg`mmTX1wP!A3Izhi`|y=L;ZsJa4Z&8;PhJ5w7$<;23J6z zdKGrUpqM<&df>3*=R~7iIT_bq!q*cH$wx|8alM-c{ptDv!UwCujTK`stSOvLj@u1a zLguihX)0_=U5(UIe+Is7pFrHNfbJ7DpxeUntPNwp%(b@|>e>lcEg#18`i~+jF9SMr@i3#8 z&NORi6;AiEVL#+v0Bsh`?npPb`GFRDdSETv2~%L6y9B!i_^?i*0b3^-Vz^!r#2kAB zt`<*V>Y^w(HTI;mwiMvOkVtslYESwxjD>N|N>%G(=4=%Xcb08p*IaJn({B%@8(E`R z+vhsA8+%H7?|WkRGzCqn3C5S**HeqzF;p{hViUYnX!@gZ?A?#C5F=(WpDw4Q>}kQ^ z>hlSE{;Lxwy|<)sQ`<9@<;Lu5_i2=Dc9=EHdL^w3+eN-t;@B12&FpMp68Wkvz~~qc z`mta%Jl0R8BX@knBe{Lq_P(iU#0m5*xPkW-_$KEu$}Kg|OA{B~+yi zWap}SungbzH0?IQfwPZc_QV8AY<~zB=&GLSx2yn@^&*-6hV>XTeLMJS^%cttCb7Cr zKA`n^OGWeEePZUifvk4^7JAjrMsDg=1A9B3!#Kj)Awl#+?O+mWP8)(t*a8nd!ww_?E@ar zxFCgvdJm)Y2}&kJMbp;yKb1FnX<)vkCHZYV1mf%U%&bcpYYyXnhY?EZ(Y z7w&|0JKw?BotfCVknf6Ghe40Jop5QJDa~Df43_cF`TepS@pvaE&<^TKl}B!XYxP7n zaHO2wy5@^D+f?NZReLE^vkEjWpOV7Wd!tQ;5>kzWx%XlUZk}{lx;!w7j?AjS!yo3r zqF1}1#cDU)4l2fqQ*qR0m(48;`0p`m2|Glo*vTF?;4a*dtx>-|)cDtpXo$g>^!A^Yrs{sKZ6JWA&c}`8BLp?%OvvbIOg^Z-1qx-PG2C8`BUoxw`vK$LyuZwQlC&WJ<8(3caew_cg5H=RxlAeXkVullD zNQEbq(#Ey~w)|oM&gX=j9=@H)#Vw8*wB&$)`}w?T_U(B}RBGME3tQ{y&+aA&Ti=6~M|jbVPu6g{ ze>3mNj3%oc>GrardhEyiIk={17z-cSg*FT<#y-XeVau5$$(-}fj(@O2yT4sm2m7k!a*1Ba5MwcyZi+&LUu0W41=3-0+ImehWSpqAzzI8|N(bKBa)>sP|?@#|31 zsLE#@cbZCL|u zk?wHz;7ZyUJV|M+zYX<0ztWcvdXjPHShjPTz;uGQqVpNPyS&xPpZVo9zuJXM>Nl}R zFL%0nTBO2^#SmG&fwF2oif_6ugG~K@s7X!ia+ht&p|PE*d-OSW*=QvSqax{Y_9WIl zeg;|2_QfxM7t-S+Zz zTsS0)<){bY`V%{0`aOT9`@C9mENaL6yM2@lGH0{v^_}U};pe1dW`b8Q-Gh(AdegUS zS#0$qg*3*-n9}0n=pW}O`NwIB*^eFIN9zR~>d>1EXY?TO@rCg(LYU_0{%qn+HByzo zmVdou$Ie`Th@0v?$+G4}#hppcSU2aPlo2-?pSxHy7pn%iT@%OJ=e0swJ1ca_>PqGl&_pRpZtwiv75qoe0jMqXg?jqiX}G)i`YqdyAtiz z{57DQB5h^F%8kH~oP&SBW4^g`!qC;eT#xgRGs4KGH3d}qyhs1gRA!MgN*?nt z5B_y&l$4SRm9&hfq?17`Gdl%VM0tR`TfVsV^D+5yUrSc@Y#OZUz@PmyRA|Sqn;0m| z$Df}9#g$hwVB(X$?6}QzQqfqA(+)&Kr1D^j(HoLGx(A#Po{go%e{R(s>80 z<#)*99`{BwOIjc4%F;|mvbeHc*lAK9$+zt~)9bEpHh0_9g&SK95vag)Ml+VYbs&Ohso+^b+(UT#0VHRC5KP~;- zGZ{PTkH$ai&tiCOEZPq`gSRclh#%jCFx#<~;@IA|*qW<1aF@z`9BqC9X0?xpdv{Ob zq9zYEt#Jpb9^4H<-_=BwYjWCiNR3Xp2o4X*tf1-N0b1gqN!4u=lr5*vVCL^apd*pu zq==5vUdzSQXZvuFJqp4){<+hcYh9xTVq)RTSxWxPQizHeQ4$+2Nqs3l3tzQ zthkjPSgXHAtiJEdPX4aJv#bH6=e00qgFjCHZvic26KF$n9CURX1xvr$KH{#eJ5c%qT;~s@{WmAFxW%KGf%PUF_&t%uYCpma{lvV_2jwvqo)B2to;?n@B(3Kx=)%T2_R~thHbrmTuImWHhd9yVfPBfv zcoqFz+l3-W=|OI&QnH!jh$C!`vHMulD2YkD2#`-R|)$)ykPx zc1~y0f&!WE%n~W{%LIz;<44r#nApWLnhn0ym3{g&o)zT9LA3f>sFk&&mX0xO{M~W% zqRUOJPBJ2+Zdag>8bOGz9Yi!5(b{(=4%>ZhNuN|>@zoPK@9RlKN^8ZIk0I2W^&5ga zXK_B@FNnWvz{00!P?h2iiNoHe*C{^}RH)`ac*Fcm)04 z-o=!Nv!ry2pwiM~Ft=9{bV$D}X;02#VPk4&JN;A+>1WPXTRaCnyF^yv_8uW$Myvd~ zLhbDnaB9*z8eRTIemKzpW*>;9(ds@dXx%|yn_d~3}h=p ztI*`b8yqw}j+H9Tv$@&_SlGsKIG*e2C*Sp^N?9t-->QN0oKE3+iecJYS!kP#UF$u8=4q$Uip~o~nR+ zn~%WDU!B0IbSu5MpnB$G-gs8gbp|QJH{r0X5ujV>DJE>v!_meUxpP8?Qs4f-$*;1( z;2`g=KDCL8r|w4;gBZrEeOT4+XKZ~PXaB=F)S7+^JLh&3V@yogdQEqh?{W;jG&@3x zYo++hHXTNPI>=6MjDwUn+dyZ7HY+nv2hVSDWYFFKHMaGWPW-GCol3g1 zvG0#8SfHUA?pigH%2Xy%;=lzkRnq|d?C#@zU2`T|a{(q%z4+5Af&EkI&RGp7f%>?j z`HX8=Svs8Uk5!@4z3vn@;1i6t645zp7`td!K>v;;(e+KX7;ID`_Oy3^wwc=Wqw^GI z{63xSm(9R|lLR__Ssw(iV^CXN3%%8nAjtWqJXBz%~AqZEJBB)rm#cPjl=`C zbj+bA^Wa?(5hinS-@mQw&)=6geb8XGKf@F3G}Neg|1rqXN`?h4T_op>VED9f6Ej>h z90Om^5eMx;zBfONgL4S5(8x1s&NW ztpQa3rx_3W{DQ}#J*zR?P1~2$OH*^^;jU3y3?WbI1i{Sn9h)MDrVn!~Vh3Dp0FAv?Lf5K6b#Vbp>dn0tCSMWvci zzq79}>BJc3^*%w;c{qyN#*C(J7n{JjeT5jfO`8JuRI`U=14y&}oqX7!$GCa<1y-(R z%>MnJ&TjWgA?FKIsMmNiR;6OZ#$7$fo|gGYZd-S=o}_JV(e| z=s`JS?;=;9S=IaiHu~HZSb66i&dTVnyz5fP2KZUSnSYV2ZggiB9Pvs@N%Uau7aB2Y zPG5Fv0N<5!@0eSALGesqrYPjRHaot{uH^YyXYLJ~!o94b8TWA+%Y?l9s)~#zK@q_J zyxS5Lh2c{{k)Wz7449!JgtVzBKHgMO_*$5Z_Jv z3ku-bmy@=9{~o0xd@7I$hR0=!6Gc3ib6h4=?~*BG1~S28jG%a4&i%36$9wY+&*YkN z|Jo~=qHCi}5tPNR%aiY|-wJ|39?$y*stSob3p|$pIMY{E@!|+)?)~QeyRMv#cT`O< z&Xx%mie(BLUsYiw_uD0CaSol8OcBfdcw;WA2`~AzT6n4|IvNRze_?{+RH>T6a-dA1 z+@vDNVr0UN^D@QaRzdj1eRIjn;XPme}}D1@#T`B2y=I5#}za2tJslMx%R`f z3I?sb$KkD?3ijcItil_^;EnZc_WjFd?hnny5fRgTyXVZAICE(Wk!F|Z*gDN%WpL)SD3K*Q#Y|tOLI&dmqBML zwAtYIS2#}*F>V`mZ)0ot>5_(ETZGMT z#P1xN0Xg1QLqGvQlW6i8WPyy8_NJ z3s|+rDlx=nH_STq9ygl45|0IVkfKhF3F&c^@BSDKZY5BLeKu@Y?#GA6u9IiF1zTpj znYITB;Qjt zR1>elLaX$M-M9sZb^YL3`2=S1^B8WAWpL^`(!I}1(08RZRBTy8Uz-*&pIPJBp0jIN zWan`#s^J4|>ul@c?onm zH#&}IVm&TxVb3PWX!EdkAO;yJ4Rlr6i52bGX2U^jrE5N&j5;X&%k;;aC7iWcCR04( zzM72fYKl}o@0|Hed31;8^0=302oDdGepeB`^0`;CLPfDFUl77M`*PDeJ|C=Ah2}ki zBB@d)Bsgav`DxB0+h46Ag=-OUIFpO4J?B#Rw$7WUG*KQTXq6D6$ zun+|O5i-T3#WF?zMn0RZRTYy28HaGHDb%kEf@z(K;)#)((1&Li9$i&YH1*^;3O*ZW zCJVxQ{yoOLYh|4^?@tL51m8?S&@|?`jHBE`H9{sNb5C(4_dMD1_aiWof2}7IhC8bX z)%<;${8T3V<5p)QN10GjsiH6rR}p^8d2X)cyL|RQ`-%%unRqE92iI6t(uBX}Y?FH$ zw0q^tx(su`CGTQ!PUKR2Fwd8D58nn!VZWs2>AEbSXg;&6*P~QJTe_aGhRVvPvdIH{ zDzpZbqGPcVC-;m&z4gnaXyriG;MAW9Xt|%Q?zSI4^X%Q{vw1MZs|54YYQgc2G1KUq3M1C2n}MryEsEdkU#aT`1G{AIKTQ@an!sL@! zx-1;$^4^8K&gzq9;xL=-t5SwByxPeBxcr z*52hF^1G=l)TgtucZwe!b9P4C5D$JnyH9^R#lf4SnE+ax8~5H&yg#%7uDx+(-TD1> z^TU4_G&ql9w>_5*&Da23rv-z%)?~6$9qzs>f`7UujJqc&(egAL(0&T@POX%NEZe~z z4AK+hw=fpauRr+pllXc30CBue6<)oTjz^`N*zj82;ms~3jkY+Cn>%)f z4ztRoU&YVyRCP1lv&m$u{VQO7WM5kKw+=Ekim*{%hpm}*2ey9Vx{~f+=>DZ2dCjg6 z$0Ur$adUIPz}f+1t|Lh&B!&DVkHF-MC5lK&@pZjlpVOh zZcV<3H+vpt@_?Pp<%J3bs;kq5PnzQP>RlKn>&uS1$6&VOI3~X8CfRR`fdO^*vE%QR zwDd^`?Me?NwGRg_W?2ooIzvLQj@OqR-c7 zxXFGBB(;tq7tX{yqG!ko55>@i13ql`M@Q*XJD#y`j9{Zzf5HJBUx2Bi7-suVV=ZN8 zaX?+Bl-hb;-g){v@tLziG^h$-7j|5LqM$RN=GK6+?qTF=JA+mS$i*oErR6(x1ZF+| z7Ywh8r+zsTaA$U|WH!)^h760x;kPe9b-&qC<-Sw+F8ekVZ-{{er&Kn5`$jf(W+c45 zB9M2`7@YU;DET|ulJSgZkn3|3walW}-VeWV!v{NPvf3>V-8u|?Msy(U`Q4dA#s~Cm zuT3-W^82y;xE$*B>FR(O@~+K*_TfEYe$8TL->{HufA63x+akoCb5Bc(F_ltrOB}1& zu8*7Vg6p_%?0jN8Z91PK^*Ze= z>NPxs;ZuGp+e-DxnEQKNm^16~UyWEh=ot08lF0%ROz6Yf1IU8(Dc&oTIVL%>L%fgg z_W@&UeY6Ren7)JFFcX(6-%2;XKEl6=8Tc=B0UNy_M>^202YB{xM-8zatZ4HQ@$_|1 zxK|x11<&I-=G|K$=(Y;eo8Oad`Z0z4b%xN+P6D|69*R$2n=tJYOQnfU?TBsM!j`0u zqwYSr%s%rbSXj=HI7>_Fzib$ME3A-PUFuAWB_Ecks!q-U?Zox9z1f;4o=~M?CF$oT zp{H>GyY%u1TGaykb*~ox(;h2zd9WF+ek&-T=VImab&fqAa0w#cMTlMI9;FsRQ~G8%nQSI4XE!pYia3pX2KVVv zjTj_q4N7HCKiJaU<`R0Q(h6b4@nXQVb}X`C4EyJ6f?BHr$;>nbGnX!fUUM(;&J<0y zXR;kLU-w#Szl&?vb8n*Fojmxkjq|}Ak7DmR1>%+69l&2GwtpRT4UQk`%}!)jg0lMo z{5E(pOBuCKd@vyj>pglhQ$2Gs{d$*Qy@BOB{=B>i&{ky{RbzZQz))SBSaKcNA zA}F9phrX4KLiXb^_XJO1eu+!L{M9GPZ|)6g0;fl4w+@C@!=uuIKB;V?p_~jKNqpBc zo*bOsiBZEGd5^*~aq9hd_<8ABm>Cv{-FCDm58)=(_v?TcZhO*bcS9Ok=<6)BhYPL?*@1WC zL`v73L0f_z!S!FcVi#|Jv~Ww4;9e?gH)bxqJmt@JIGQoTy%sPkVWgxIs797^FTkEZ zFJN=dUa3lUUs^6Dke+r|>8{O8Hge82aDp^kdx`Hw2G-$3o|DfyxDUFwW`Voi1RS(@ zCC>+OZEbl^@q1+^blo`&w^&SP@;@W8_>_^pLn!GSJqx94bB|;fqrVd0|$BUui;g6`FNEHBRG5ShY2jHGr}ip zcB8mPi(0>I5ar*#$af5yMIm=X>5iVB!@a~J+**2(#zeKF`wKIm!E*wc&rzfKi-t+< z{)?op!)GXu3d5M^nNK)!cY7A0B4bHcZ;GqWy3jMD5Ej>E0Iu?S$gDmZ;N-jU?Bcs& z(vJN-nCekYmfVzy>$-=t;9?6pHdu{&J{);ZT^gku4TRaJCy~i`Yh^|L9Y`yf3GIXv z_}1p)3IQDsSM3$`*0jv2f-KjkvgFE_N^& zNvHLu(qX&NSia;eT3tHCV%Lpi30KF{tkz@gdPla1J4S7h+*XWdYVXY8(f;GgoYn*upLHMS)o8M#{R;5=OcPe% z+(s|gMZnrQ3t;WJa@sI&2k4w=h1wtKtYFSrFdBXmHg1#fQSlE^@ufEfYvt3kK3c42 zo*sL=WS?@nI8_WO^I~63$I+e98%TS&M1B@YtlA+%xohYT$>T>BHfdeK`p0Y!Lux<6;bpH~)VI) zwbYN+u>Vq5`tx-^4o71ObQ7rIOsQn{+lzF+JcdzvLt%HXbUdaKFS$59!7Gb^tv&q_ zQU{zspXlCj;(j7-u8O6G;&6JXGZ{i|wWq1NrnKeATH4wx7ygU724XBmW&F#42qhCP$Fb&rr2=3WLn#Xay&x{ae{e_;6^b=Lhv7<*$=&o24xVBOMH zna!G`@`~A}INf$I?Ebb${w*j`^i*3w6<^E6xx1a9{@o$^bZEM$72O}#TRnj@!BJ@4 zyB&4<@kSEAwqmr(acOYSV%BV&Bb7Puo*msP2%gj=-Tb#jOnoHB%Wb}#O*|A<=-6W7 zbZb0PBhrI{oshZV7>1`t(~^%K^4m2{(x@r%ypL;>n23E^JlNO;_En$pe>rLTA9Qc1KeYivdFNlPnyJvR-aR9@lH4He?v zVR5+VbOmnMJCK4$&Jkyi;#tP4B6F^Cspvm(CMKlUS$XU`feN&Vv}_B*y!8qu;7ZYlMYS1qIHnk|vtvMdJMCh4>>s7(2f5hmgrLsP29UEAUUGGwumc5*Wv31~#GjyTy`v zRvO!2nugte?0~n1iR`rRS@yf^7Opkl4R)EIrS-Rp#g8LZB&UQ3oA;FXhIKzc2v#xks}Rs_zuU2b@S7w^J5I??|&+|;)br=D`5Z&pQK8E zubG3(CEh!^CY(MG&VaLN8dPZOOAcmj&|Bw_yz^Ew>hmFvtv~k{^MfKmXKE4DW_Cki zpaT7EQph)QptRxsPFU`ka?)%2IN0UXfvvlc2o*Yw(0u2OxO=vYx<)HN>uU@c9t7xF z=EH8sM`N+|ZhO5ZQ!Emep&~JzCiNV`{+5*T{E-eBPtuo;Mc9EnBbPPnorR9cSJA2| zR$B7aUVi)eZb`?ZA1!|8hEFV%pqo1aP1DR-yOn>X@(y0~s4T0(?zIcF&W@m#<63N( z?MwOY&D~ksrnPVZZD_@{`OIZ&w6f!(a573>LdET!*y;P>V9}*3jok7Ba?D%hFGKyA zBJv)t+VmNP@JmwTtP=RJums_PA)cK*hW#DwP450GEaYkxe(tBq5^}?-yxkazOmEM6 z-+lpgzNg@%kv6Vdz6-sSu@LiElgxJLTDgeLL(z|!b6R&%rW#XmEkc&Hu@iyB7bR?LF6u90|q72g&5Y{ssi zV1VS*$I{fxY3g+1?g?%5YuCwnls5r`1jN+8_IHt#=svZ<_`-4$p@| z<7o1}(a(Of{}g&{cLG<9G@v6sDG;FDg+^T-#Ujrhq?Wm3arMU?FzSi|+wRBVH-7)| zFI$FAGZ#YH;O7hsc1lAm^uaZ&D}CtFhsI}~70=6!nRmo$$wp-kHI^P^(%Bujbyz%` z*NhOV)MidI-b#B%I!e#_Wr{O96pKEB6xYRJLi1%TkQv@N4>vO-ya9y8g-%V>c--By+U-*9E zC}(jsIjSf;;sk{)*WWiQ-Y$ph69Ry)TFaG-0JTIs!6TV;L{X8#Jge0!JiL>}#;R??oavg9g&nJ%B z$a!Ua&$3}R&+tv*TB09koNCDgeG`F8LV{4sIRl?<1%=rM6~#Hu5*a#FMJV8!VHnr( z#C5#;Csq)eMyLtTCh%Pa=ZmE5R8!QDOkuZQRq*0Fo~5&7ibyM&P|TSEg}wRp@ob>) zW1fr5Q&UW;=lQ`KJi9oA|C;j)xb7&t*rBSJ#W`kPe9v*9KYu+(nWE86Md8e^Gw6_- zVoD)T{G`s7U*A~3G9LA!`FnTC|$eZLXPk~8Gz^Y+92-Ii?a_RCmZkqrIx zH?fC@E5ye)%*m*&SXoohmznR&VD^#*t>|$W+i#5&_ihWe~j91kDpHN#T~;-5&D-? zU>|q3q<=SZlg^2~__Nc+0Xp_`6J35y z6ZQU%@IAQgQ%Nz*ivE6nM&ElX zDEswDHmpO7*yy&JMV#bZzK^%z^Q#Z=_Ad55e7`?_x1(sPP-sh0N>T6oT+xu#A~YnGkgbf!csCT%q=iaKL@H&~`#x7v zN|8_>va?5IWM=)Y@8kEEcxZaNuXCOAI?rdj7?mGnoo`-s3Hd@ z%WBeK0+CYKz9DA&diC8;`cQ03zf$Wkx-1C$Qyj>P zS|hs4y9OqGiGhK(%4G4GNp!L=>%r{zCrJln;7@`W^S4)$h`-Z$&M6*iQZAA?y6SYh zvkTZO`tgHq)YAK&-lF2PWC$NI6+JeVkf~~eXq8?oJUzl_<`wyH&5n6@qLk2N+jThT zHx{;cXTaJ7ZJ1YffCNuEMef>{kxS<<*397L=`wXfcd;zc>HBK5-Dn62`=`s#Iy4cQ z+iHZKJ}DA&#+r`auLU2Q=fZv0-TZ~GN0DYGka3o}WWlm9+I3xllqEDm(5Tx`_x2g= zkbZ?e(@ca2(Fl>o?_YTA#6KvO-9Y;K@}PQwC25HFBmEaTg_2*pXpTZY1`quXgI13u zgKWo8BjZ%!xoi?_@My)i@61Wuh8Adk@{`}Hbrq^dUW7$WUnRcM$+TEr4nBTxpx08W zU`*Cw-9q{neUi@8WMI_=0=-jrG zj4w(gEg@-mywsdNtWG1U_ic&IJ|*(VPE+Jsp-UFxH2m^18;|abA&cUxIwW zbO*iPgFx4Fkx-C6Na!tepsL>{l7lt{_)KJvCoVUDalaqVGd3ZmbPvyGJr*n`yWooz z6}WHmd5L;X9I@JL0x>7&l8V4ej2LVWGc3EYS5BEwPn3q!^{(|a)-MhX zobSSq+xAq(dIOkl9!AdkZXkmmwZN-c$7$z~O!7)MT1fBTMV(&DJ2tbu={h}%3l$N3 zrd^htDb^r%{)IGB_8I(%@Z;M@Y$2PSBdE4%C0I|qz@KPSr297xCLerMXi3{yl&`&k z?w=EI#EaSJQKChk>z{&z&Ir2Xfg8xK4}_xP=YskA?YK2ek!+7=x!MH_X=ha_&RqYU zKe+QNe7AiKC7UN86C;8OOrdE)2|2Xb8uiS#kobQbjomke91gDMBlas0=j$Ky^!^}0PNn?fWz8&5)x@b22QS|v*Ut@)cYgEyD@?aO>y9P z?-mXDaR}oLPLKhNEh3+4K=&HQ68+oibY9AKdTStat|z~T&cRE_52KURb)-6Vu)9o$ zGOp71_CWIfVk~3d4Wj4RZbwj7CV5sJgqQBkqE9*k$l?cy)Zw*APEPMnukmgrsCIN0(EdXw|SFGB`R926TJzx?D62 zKVblFbTZHddHDLZm~x70G-#Mu#LrT|NH#-B93RYI7mnZ|9V@)*qeb2Q`!Rk=FfZ-W z1QlamVL+l1Otn)){$4U^la}Qd8HFR98%Tc+Zw1Tk`$+KEWn@w7AK{cnGVc~oNW(fk zvbON8F#XY3dSr7N+_b*SugPdaxzYZlR3jCg8lJ)3-u;5Ag&ci%ngicaMX>ikHNO7P zg=c;mlS|qLq{yNKJY6@#r3_8c7ycGngTYk-~ z&-n6=E-lfE;BWuZp>NJ#g^Zj4ua9?(e)Kk&`aQNTvaWb9Ja}XaZ{}By(Y$S~9 zyhawyoI#zoXNlfL`|}xUub^QQXI=$l?w!OBxpEwhVke{D{cobijg}BG!H(X2Q4EuJ z$&sen`Sgb2KvY<8h?GeRfLqI6-yfQJSEF4dODh)aFUd2%>r6UHrHU-yo6R6XP6QgE_B1Y#-Z>k{0B}yZ3&LiLx{NDhO7v?h3|j8<>jwGV3O07 zbk0u=I=5p4JwD(7na&t5hDp1L;eA~`JMBZb)vUd!!5ffEtWO!CQw9I6(WPHMThr*HP598h3_SKN=jX&t(k7<7u9NDf^&W7yC3NCzzKAw#e?T5M|$nZeT*5U z#<%&~(T`VZ$ZY*r{LgGwGi%h6w(^PeNXr~zF~f$Y8Z3f&g{o9G@C)wx{0u7I%mkZd zF;w#yiXL|FBLmESVDrIIxcovg4Ap;0MPJSk#g$FyLL(%SyaxVCem$Ch-3#xWyK&tD zH)?QaGpzizT==r~0dh}O=&t&7i0T|bdJEO)x7#cEgU@`y_K<|Mc_vbi^68{WZX-q> zOCf767l4#?6pV0P1m5eKg_Ey_k_%OP1e;b{J`NS(L!uwOb!`ecAA*oO!GjiTYX!ws zWy14=N3nQ)9C`I~4Z2;QLjCq{qweujh;+$d$*1#jut|L|-7KhL?;=lZsq+)c-X6x0 z(lRvvxewYeNOs6mf6Ph(WN)R zO8X~#HQgvgbQeNM;#T6dYCJ4bj|Yi$6W=_v5oG=ou)lVEM>c`1C^!P0IfY3SZeiR&_sW6=Vg z89VR0j>E`T>%hCc5AVEW`Bb+}*tM&RF;EN8^k_Y3Yres?(>y@NY&K@T{lPEgpX0Y% z14!IzMSlNc5$Roe3ZhPKA<@g(>}}CxAS|y7ZB935?KJrte-aC-vvp%uAz%e0M*E&%AGte*0$-YC0I&m0uA4QyQL4n+ft; z-(ZGHCp?*0MH8iO!=r*bP;vhps{EOagEuku`K@GP_v0B@hs#mJAOY{@4yDPP1JKK( zo?9ys>SAjAZ>nx3RW{@)j8YF3v8yccHy!!7d4uWAQWM;^co*s2V^5ZqZy}Zm&+&+oInjxTr0G{;L1sEnAL))n<3nHY zMfn6+bN+_#tki@Lv=osRzkB>p+gUjAcNku94X;&dbRs>)S0KeB2gd|fibl*X5i)n2 zf-S!nV{*AZe{=2%oK26yqL+V#!A1WtX{jCo(-E|GzZ}_6W&yWN_u-fjp3e61qk~qn zb1OL?CU4$OGB4I*eR3~W^(f-nJar5+D}d+kx`pw#1~I=bsROXycJB#6lfcpEA-{;VT*jT!QjL&;Y9Xo`rBn3 zl{v0Y+pYHy)w@$jo$PI##-3LRAr|yVbtg<=-2X5A(%hzc8Sy0656fYB<7b*O;>DL3 zXKRv-cz}znSkFb4Gg~Ch9bo;naf&kH=!cv*e>BJKzbMU3b7FtDk`vEog-6Cxj(g8C z)AO1C$b+$qvzu9e?&8_kJ4s5W7dN#l4CsN zKq>JjFP1I5%{Zj2zi2y+eg6+R?wyF^LX)M%(bb$dFhYjQXW8lq#xzS~eA6@Rdt^*O z@t7t~eA9`!5}qxin?_Hdsg`xb-8Y)~>D{5m?;Cd9-U5ZC$*`=_4eiy=!cMOlbkG+h z1LSKkEO-Vnlo~`H*uIB{;(0jYWTG%lPKEUTjYIPx58=wr4=A^+3Lkkahc;hRYM5mO zzvrhCkK}YJE6eaF zYQ6-vX-py&xfiJCpBfms+Mnot@F0hyyo0rgKb!;}-IWS3tbOf*d-`agc+qj964 z^J@>>Z7qXx2QxB@Os-upa~HhdeM?B~n#sP;O{6un3#Y6*0V_2+AlYm;TCG;%GhIY* z;I0;~S%F|s@DMYYb4S)<1YMk^4oA)^64%jU!L#oSZGMpq#wD`CO!n;CKhcPM)C(h1 zhFnAo_e4_i$qhf(oZ|l!u)6#FWa`pYPnONPf(sPb%&^>meyca8$66Qh168KLqmg=y zw>X<_Y5WG~wh=n8uNQjb#10eV2GQCZ6{zl$KvNgJM&ybj=1=O}ghfY0_ zMmOJ3Vw}h`SUTPn-tFR>>lHtiG4 zhcb7);s_)&2hsKyI^^`Sw=jJ`5137I1f6%5{4^zFI(0Jh(v>X5Z!B}9ck8xLH)Iu= z+*=IiZoY=M8-GFkG#@mP4kG>8nm9UYEHO44NTaw>u=mAPm|6Z2Tlakxv|SBp(y_s0 zt;tr2>!>~SY_1K)gu38&lU!2x>LqP6T0&Ld^hpXE?WpGHDP+`lCtk{+mCvh;z+=jW z;Omh=rx#74VQqI|&5{!MUatx_r-|vHC_PfVnBB3@u!K`>C7rM}0~4j)$#?as{Ib@0 zn9E)(ccz)pqemVR;ZLQg{3F}F4dU^uIFm~3Mu6P|FS@qE6c*kRgx_nA(7)vevFi3r zy0G&#{^?bx2A`($J<%`BRukd!P4%)ZwpKyiOkEgjnkG3(4nA<$RN!GtJ-agqj_x zG$`~ay`4FTsy_P6TmWLyQ5A#}MAxwFn={=#rxjjn6yoWtA0_?)_wa>VC%>>}F({?& z#Ye7#NueB~rC%W*d^CsZu9GHnV{D21dL=qmqCi8^KZ0bPE&YDsBCF$5pzmFp;BY$$ zEwbj)_f@-a%Hb3`%gJw0E z@gqJ&Q8o5NQn~tv-W|4pnz0UI6k+V7>mc*j z5)5Mw!_8BNXn(^V^7q^fP>~zNUO(-~1--|WC2St=P)?O5OeJM3BhGU zCSjT4eLi61NMUon2DZj8=QZ>5P&tVw{$3*T&}b=Y4EZkf7AKLB)unXHuW$VM0dHXS z#WGr1=0pDb;f|QRL0FSlN*o^$+`Mreee22gqBq&ntX^Nb@^%V%)^lW_k17cbcnGWW zcJr5`9^m35V@ORE^0#{Qa3J$B+KF?JE0v*(V@ic1W{rYXk0x#X&xuTKcPAcOCyP`c zALZ@*WyzwYLx_*|S-8Y@;0Ccf?1rAXbfM`n(VqMJ@vl`fZ`l$?^#AP^CO$G0Rnv8( zDe;%XTj@4<)bSEbrgP-%Nm)M0sR2e8uMvv3*-&Lycks^4fOO|dwuAf;t1>m|qQRZ; z_1zCFa0Ifq;~_kVTmqMG-WP7RUJ$-XxqxXL<8d?Yw+@>bO5bGrOS?Jo9+u&6R$*R4 zwkx1y$#IYOu>ArRX>J|cY1qqVh-G&fFLb69_i-$9BgwPfD0Yq?9>HptFC4d<%{@Jt zTOs{`6t|*7ihIK9h6^v)&eSi)7iZ@`V~umlWwK(AMov8UvJ`iwPnsJ!isRhb406dv z=2?2hW{2~oxMx<(x%iE7O&No9i!+;vviW3;hZHxL)e^I&GiE8e%gK0nj^SW9$3k~N3gnL`HVTxxuQOO%d2c8hHCl`W!=1)B!T33 zjKPuWUopby69}a>blPtln7O+VUXF36O*b;Am5wR(HoFPN0_%V6OT>&_V?=|!O8Af_ zbMQ}iFPH?ZAnVrW;f#Vf461*R&(c@$>nj?`!sYhN<&lifw>84@!z3xaymv@--p;;%n|#@ikvw6hF`d+9F}tNkZM(psnt64I_n7^zAz6IP8JAp z>s~_e@p=61RT{9d`7Xq}nb3o8?+9xpW68aDrf@o62FFBsV`k?kthac=U%w*8ZPy>; z;&W@^{qt3l3whgc=z}z}tz{*C(*N$jKl=(HlcLd|%^nK^mZR0nF?7qi6zDiHgUsH! zOSn-w5Qb*y2&?w@!k087n!SD~-#6?UWW0zIE-gz%y6YjlI68uMD9*#jKR z+U!|Ws0h#W7bf5Ems6K zCr&}1g)Ol@IDt&*uq9&`N|QewMZ%o!BQPbm2ldY<(HUVXWKD?+{k!m9%s~1wlOL<`L}K$?hgkj#gb=0a zkP@#!7pw>*;-C(RpvZQcq70zC?1F>azkbpDrY+=U`$7Iy|7=oNHWbIT$&$$8;dJ#w zP5L=fllEz}pxg2VZi3D)HKi=MHU!%L}TZU|v5u^*ZCb&JYM}a-%hUi6EssMA+yu zlzvHggIh*qU|?Rd@am@+_|J8G-{~c^bkTmicDa&&*6B<3ZPuYdYTN0P+q|4;<4y|u2bkjF23vk#$|N!-vzF|edkZd3 zoF^F_q5%HiX9*LGSUu2tN!YeEj?FrmKX=as2sGM9MvA(z|G*4EMRua(+bK)c8NHm`HlIu%6i4Gc z`_XjAzHB;(*btwk|{=mfy4kTe>9cQK4+ywP_D3)%fQ5z=Cw zqS>7}*lsUN0!2x*tRbHtTQdhEbzfl7>M8KiW0K?S;^xk?A zqE?vTzE8>MUB~=rx0)owv_9Yu-}$ubrW>`{)Prd%dq}6>YS`wHkCR{O(lTvXH0|+5 z#m~E8hd6``8sI_{A76$@hX7*hJCwebG33>K4CxKKNH$kBr4Qzerl}S?@!Vc%Vx4CQ zr8aEfcs&r)M~)%+)9sLUGT)d&0Fj0lVDdy?IQ+4MpKJCG4=EVKo>hwUPUAv4NlQ_< zGgnDC_kBLtm5jmJD51dSQV&Czez@dLthvMCpyzU&jj*d9i=YHN64RS zZ;}{xoL>>oJS%TnL^pd{L^)=Whab>wVmF+oG8KyBe!-~f`{ebyR_R!0E@xcRMB3f5NUnh=6&PR_r zk6%oxt&7P|izvA4c@J%0$I(%5k5gfo9_XytDfEWcqj7E#jZ+&+>L+>9L+zVUzT_&- z3UH?5V`6ao3LnA^&x3itD#^7sNyOfc<@$pq^tZh}&Ht`Ux;CyQshe#{?|>}GOxQy# zPvqm&jyJV0Q}xMejiF>=#6HqF|2m%2k|To(29a$`SkK_ID%m^hohWnk9guRL&dbSb zGY-WedVBmfIyXH7&0m(om^44?SKv)Pm-`AkV;#xhibQh3vYx(QpGc&R=F-u=+N3Qa z7e?#MrnV#2LsRYxetY5(A#VIIy4|Q9PD~s~PaivmtxKipGwD2%;wewoX2_ElUsH)y zt}lc*9U_5sdNA0vPMAGF3CCVL%kMU~qUnP@N%-V1LUCRb{5RJLdd6jwSB;;D?$|vr zBuJ5Z9QK3ZC$B=6?rQLIJV>Ore5o~xe+t^`2E(v)HxhUIw=g97Bz?X)0H!6K#tCE8 zXim*V{QNf$Di0>o-|?<=cwHH;Ro^LcPr8hY5B>*kNuObQ{xoQ+sRz%l_d>mM4h>FO zO-w`CGh=NSIq*6YHJ-BFqKQ*czyB#5&vr%W(@LaB={of1+0&Sfdr9TJ3ZcK>kK}ZX zp;eFO&`t4sVNbzs43^U--ZhL7dP^3r`PG4)!w=qMdY6O$`j6lnsKWWBl(Q4PoQE=1Vk1N!~M;{ra5%lNap(lkWNlRQ+>fb|{~;LxE7 z69jcwJ}wkmKK>`1{~QZfAB+cY@2@cQFLQ^VXLF|=8Pwf)rZ6lsPiS9SMw53qW5djy zLTb4^QQKro(&bWMhBha3E)9eosyVp1;W$4zC>frdX8W?5eY|Sc82mI?ohnRD1snAT z{6;RA#w87gUxm&%$@@5t{OUv#*EaM2WH+OB=44FD8z^)N)4~0CsW7KXmb_?wEPAZP zGBl+(@K)L%_;k4k)#hn|`H-nX(Le<<_GlfhN!=$baG3*ZhmN5JCkYMG*hzD|+a7{AM=(@nGK+mqEf39Z=!=k1_`n~bAZu4iT*H0j+|18K+|3PRw zG?8vN*22$evmhlwW6*s{4rsneqU!M@h`)C!@%L#0`EKSjEKQ(p1}j;QIGEOajV5E( zGZf5%Tq4u@50re&XvEJ^+M|!C*c-Jp4+~+)xB}Lq)uy;yawx z`U^TP7NLTj3)%molkeVl2j;v~ftDTX=FdfglX< zdPY_^3_!`wHptJ{CegPpNuEB~$`@HV5Z!-QsEx&e+V{O4vYevfO7G=7kt2EgoUWaTA!I&W!n&I_}AE zR}XSrSdKL3&KwgQV`k(tPkpyB$0;#yL--wO@fyPJU2an1GL|KE|HFybi;z-Wu%V?c>CAbl5(811J9KDJ52Y#)+4)ulcgG z4EJ7&eXa`}=eCWxamO=fMg_;+w`BYDa~KE2m~lyRIj%%aj*G}=Iow=XaR=K~zduKY z+tMQ~{xqFJz>Ypo#56d?{(I~^d#TJsAqE)b8B}s6Nk0#T%PNvR9R~$MsveDpl z47N`i08fSl@DT+9oUZnzGrmUPJC)6nBZGdU)OhAUY#(pmvtSOjm&v8h*-Av`dIH@X zY=A~JXCOai8s06xz%Plc1D35PT`ODp^KK~wZ<*8bnwg|}ek8Pv-cIu*z7DG#rl9jR zYx?=|b?kn65?%$2go^k#coKd2OqMzNS$>=bKNqmBS%(&BT;z{@o&dj^<>{knfhPVQ ziKAvd!mH&QV7R6w{kdT=N&I$z%H8$ir(das{V5!IU-3tDWkxLCL@$z2$DYj=!RR3j zApNHK_*B=Q9Df=}l7<|G%pJ}om~rbS$j>AzH(i2Y-Uu#tohFVukHejyV^rgl6HQZz zgC`@KFf2_Izg6Eu>!cRGSZKz@eV_RM3Vq<}`nS;M^b);)v1eb8h=l#L$MUE@5MnZ$ zehP6F4jP#dS(P=U-v0o&y*mIu!y`e>?EybfAzv7sZ$V0rh0rgH6sY)~8ClRi86?kc zVn^;B!BF$Iq-)+Qe%b25@aN4bqC2CJk2*90#zwG=-uT&)$>~O7^ixoo+=#SkpxW)dLaDC2d_+I8s4y^EjOEPWfW9TC3 zYw|&@Jx{S?#Z*53&IjIN0?+FOYT{o7N7}X}Ly#~0fxklEKvbA7wbV|8zJHk*y`cp& zGK}%U1v4r_S+XlmpS0SH!Oy0iWJ-hyz4LRl@aatD#1xO6P~!Lp6H2tnJeMq1RR)9g z+?kR^_6Ckmt3x2f-i~g0x?8X-tAwv5Z@~RRCN|vjLW6C_WLuUFuTwV!cb$76>~Lum zMum&9Eoql>BNZ2u1I^h0QeDtY;uRCpo4Hmvs-%XbOw<#Q}QqSXo zlku$2s|2fl$_p;>n{cRn1phYHK`3zV6`W)>$Z5?-P*K+@>N#&ikK1qM+huJ*d4a1i zp-B!b%+B$>H9OF%_XTE;52uo2zKo6f4r=aBL9e7om{ju^Z`$s~lpTOu-d#f1(iwD; zxe2Z_FXU&bNoITTmgc-56TH1x=hPx?gNWdP*fFqd>U8uR_1 zbpS=1MZC;3*zoR};C?R$THgEN;A{y@`>_n?23p{%(iD*OGax~u6tGhHF0_v`#wn{z zXB(Z4Z- z8eJYuvOmNU?}st8<#L6vY4vSb-Le-pESOaLdDsQi86HVhjJDAW=BlJ?d;y(5L4qn4 zwNNG=4&R?ylD*Aegz)PcpPNbSkIdj()fUol3wi2g{RX|% z5T0*mdsE+D3jyf^h`nl**`lwqJmIU))5^JL;o{_VWck}EY(G7kcs!RS2W}8g zSB)+g^oY4(FHYU>!{&jhXqon1_>p!2297umWPBsu_IN6>_gMsAf2t9G)o@rBe-0f* z4U%eoIXZjqVp0*5gBD$f@!Ht?!qe>qVxejU_=)!L@;HNT+3|)rCA-li`T1nSS>|2!@Fe?XlE|!hDd?PV z3))my<6XrC;3=O*C%txog0n&N)Lk2rd}=rao0X(#rxmq5S&A+58*oGAL^NWamv_4C z^)&c9Io7)jJ|}(P&E&_D6512768`R6 zLO#hifW^5GI=A5nJ{)67-Cb^A|0z9E9XpNK#mM2cZ_Gh^Y#q_hd&2Mi`vZ0Kwv)?Q zZSZx4K3@^zNlm=`AoBvtqrEzb9W}@Bv~LEOc`qU=$7O_8+d!;}wISWn!^!o^e5ho6 zv5|4f!tI+w1^dwPw5cfwg4zBRm-40I*M0&s!q8ph{JL;49;m2%gNqr2xGDZxSY-RX2)QiII5=dGc5BFdEfh_$Z zG&p2U;G}e&*VQTHj_UW?z)ek%bT*JE#~mkyle>iDZ1;KIlFzt&>ps}>_~K9k^l^h11E za)GA3ZWUy;kE4}u7?f?900*z103V}c{E*R%dAGJv(mUrgZn_vu7Ye%(bXSveV^d&% zVj5o@Pym000MeZA22B+aU~xzrugo)t=)S9vGJY%>+b_j3v8n<+smfQqoJ1s6?%495 z2?;iO1?t1TL*t$tSln(;cT~vJZTn;4vb8CF)v*>2$Th+{G0WC>T%yzRUx9zc6;xQS z$j@NeaDxHIaHdxc?D;l`6kFN|i|o(SfV#zSR2+ygACn>Q{RCK@d>D0=_QHn}IbyhV z67Bb$MzTK6#YaJY^mVBn>zD5qR`Z83b>?sWhIOoFnAtQy<lsn@OV9NS?)&c5ez<;# z7K0gFdz*qqXU@Ro+Iqfv%^9rbjOfXv9vq)jBm~=?f{d6Rkl(wJ?yH%EpFU5cM<$w~ zzANJf=tyDSqf7j9gO`wg^BXLk;0=?{%0k$^qxj$MSZqu=$1fRvlD@83MM{P=!wW4! z8oy45p6}Y^=a8`wFCM~nAPec9X|mKK<0?4sF`ymXW|9<`5A#d4=&GEH%)6BcLkqUy zyG^0wCtHF{RpUr2sMC?$3E^1H0P5Lw84fn|Le9ctRB`@Vawxu#KN)p^Cdm$@hFl|A z5u{B&mM}iZ7M4#4Ql+=XbwPAl1%hBe9sgYuPnzH}m^ zLs)-v6ymcR=Cq0J>bl=i6TMwng?qgJfnU0qruzG0!gxJ0xlW26mMx)=zmLEp^glQ- zQH*vWUGQbj5VFN`A=oU^A|W(}I4@~|=W8aw%MmVQW7$Q~))kX5?9F=q=30)FubWAO zzpW%!XEno?1=8peF-3IhS_&MCkRx&%W?|?W#?4q@#$Q@b_!aKs7~>$6$nAOuIpNnN z{p(Jl)s_eNV%TANa>ihoW{^xJPlwTq|E%$}?N|I~JAi7|>(SRf8Tc@=6*h0TBZ8p= zTsxkH#P>HAyLIqif1QP!E9X+r>r=?teR{<8p*~K0=TEX<9f7{cAm}N7D9m{LOK^$P zArls55JTlen)PWC-6TH>yB;6Lj(!{J(b6rP4>?J7E055BrR(4_o9lAg%st=5iTxRS zQ+kB7*d~z8V!Ncpd)W1V@MAXje9OG`)iUC)*&Mg-1DmI^`D!G~m_21#GFeB~CSY#* zEk~JaIzvhvJyl8^@|rp7bD2x}pfuMxh0T{4i?gs?inI1+F3S`d@vl`JS3gFI`^nya zMlxgCu|Id}VfTD}PVAY(X1rz0JN-h6OU`6WP+2K1%byd!V7sGwzc{hwW{%5Ck>URO zN^y!SWW|H3IPvnuoH*1(id&$VxHqlSx;ix zBN_3CDV&&jmpLIunp0jV#T{Tjv#?){M8U`czi<`wj7=1{pGmzxG5VF_2A8dXqFCE?aHVB+UL>fsfutm;y1oq z^#xv~2UD%1DkLg>C0?~m!_60Bguf=exO(zMGRk%du@A_G{q_RQ+80S)4_QDO4pxEk zSc&M(cRM)V+#?FBJx12XzNSBvGliw*b^OO<7m#o9!z;6Hku0wZaOr#(-_P<5<2sb7 zW72NEcxDqsFb>SBt?)}c#%H5(n!Oiq)6ZQVmhzalH@Y~%yOwL^2v%L zKN`oAhz)l^ag02@2<|jVdnK_sDxxu+GZf_?~Sb|Urhg9*324SQbd(ZV;T7(ULBF4;Dc?EI=tT5me= z*N(}MXUr{SWqlXCH+a(+6={-Sqe6%L(gas`IcaC!F5o+{s7(>6$k-SCo*pp0rkouoykv9Ge_qXqW_1)Q!JM}PF zA5np))v+k^WCz;$n~=S)hCr%*J$bXwopkThgCFMu={a%&=54%B+WQCL-mxm8d>I)Sl5$P*T)PV7!`Bl3OF_c%PYJ>fvj#M2SHY#fh7qquE%MxQ zh@_VFzl=xP(UC`YpwPbn_No1a^vLm$tfoi@`UK!|>s>fbyn@{Y;>dYF2QoKbgKP`c z28(Pb9O|P?k2)Aj3WX)mtWb|CcE z$6pSd5)5 zVh-6V{;bz5Q9su#G-bQPuM9(CDtaL7^BF>~?cacAO84Q}=K_q2y23y9m?E0cHIYns zYEF{PvY>Ic3{@Y>Jn!3O`NvcIAg*R9$X*b@IduedTb-cq2F~L%dlDd|It6xb)1Xgc z%J|B-4P;B-KX`t%)FI=N9B-9g3-iNbVMkme^C$m>;Y-ue)_5g(T;s!>cI9xqa|4|g zd58I7x`l|8MPwm;3qxj~;rDMcCS?r za^^vJytxPxrd@{JjjJTLw@%`hp3TLg!nq`6B)fN?Rss#bcv85p4*&jR``m&BSVtPb ztUC!%Sv&_1TKt68&z^MoqC&b;f01P1fw}1VA&b9Se*h)-7m)f1v*;1F`=mYF9Gz_w zghi~LuPUKSdpI~s{tDxqPU5$5_RxH41-jE% z?Cr~;r)@L^kNdf-=U^#VEL0~iLLKREc`KF$y9kvZD=~SbI?cN2ff$fUf zw07EFQP=`C>fo}SU$6H7;7t12aS6?5W*e=M8La^T7AEGEw{KAQ2_DhR%4W-22CQFM?7P0g5 zf;4w4O-4Lt2Pc02oD+-LSIzvdg`NK# zn?3B*l@ecO=e-W&W_&or@>rqLTsd=Dci2ja(;1`U?IUSU+p`p-Kg>tdKmebO^K0+S zA4k-~e()JpN@Qd0YvCxni}4SGfUbW5)x-KRZ0HAESkZv{l27xSbFdZtQKUi>iX4SN1r<`bS`(kjE+Q{UF@$>m6x6je zh~pDO!F)+Lv2t(0ME5SWD4jHqmg9wxJQNkxf#um`Y8bwdYGl1->>XJW zTQQ23$}fQ%M2m(cMA2~B6Yx)4lirDyBTHE(rBf>#pH7vcVv90NX*dOAGw$&tTFmk2 z$2N@W+l5#4;;<{G6K=Lx(=9Hu;P~G$1KZ?Col1vj?18m4 zk7OLk$PI1S&GseUGu}z#WKOKkm@2!KrMOG11`cL(w}lO?rd+~!#~;|-=>aEhpDQi) zmyr_N#4?XMbGdtVGk3eR6c^vd*fGNycixQUC^t%ri7A^;d2rn5XqKmB44Hh!Ou0LN zj(cm$o*lngb}?O+yA&-ezBz+sH!G#Yiq)L>|0p`|xSswmj^9s%rVv;r8X3M-xASRY$5z?R&G$K*WcGxUu&e2-YnOaJcS?4 zz1zZvU-a0t6_xs`I3r}*t0YPhpCfYUeftO#`Z=_O3I0(;u2jTHA^q_E2uq6gU z-zl!Qa@3NFZ%XP(9^Yw3m<=s4$Pl?6+rjZ`ERK#Fz%grMd1cOS@C@|kG5Q7c!y#8u zwSEID7PW(n4}0lz;S){|x5LF3KGMQ%w;?$56B##ml85YAk8azRC^ZW*Vcwe(S!3B4 zT5n#!)35L5(`7wN;js_KlYYbDG|)L6gPY27j_}`D z`@1n7OYg!>)?`y+l$~ZujSqKPGMjdL1Yy99ljJwhnP+sEr>GrT<84Lu|W)X}|;p>eAqX-zzCeK18Va_3Oy5y%*y%y>*xu z*Ac_t?4T2?*9cBLozi-z!&|F#<<9F)-1dtl*F2a4LG6qMqY=)jMLjU1>mkxO?xy~i zjw(6@zT{uQ@ZF^q)b|>}dXF<9n1GtvC-K#Vi)m)J!<1yZmu$;Tie}pP!rQ5hd16K| zCBB{q{YO@UQF{%}9a&9hHdMf*J=ZmM&Ia_^w=-Jlxhs1o7b=bZ7KywJD-8LtgZtMy zvEPc9)Xp>vo~?<-l9KjldHR@AdAS_AEg!)FGu9|?eZ?A9B=RVFX{p~EX-StGtJH$} zk#350ci?@I10l|b`^4C}EZ*fRaW)z3$yKfj?ZkvJ3YchON7 z+|gG5eJ;ke7sISX@5Fr4_n1oh6Qz=Jgw zINOqrG*VpWT`fsPCxn0dVsSnf{dOyTZRyrkaZVOJ`ID(y(nv8+Oc48v7fV#?&SJhf zAeb_PE-I!aZ_Z``ijppSXGai5he_NQsI#@7T21_fYKM^CSc zzrTd;+0r|5+Hjs<%y^)EKDO8&$|rLE%3B<#i!AU^o~3Gri?Vyt-|y$)bHqmKHO5-L zdU7!9E$}^cq(y75;ll&ju5b`)$JyZ4?d`Z%@CE9#>=zyHHjqwE{vszuh^#{ASZI1< z4>jEL5K3J-B3B1sSlxc;71jc;fgOA|7r7elJ8@ur9{D^O#oqCa!Qa6NwQf7{<_Zto zgH6!o;U%v|p+@{ZxeQa57tkp4zC3PWdlu%?ochz4&aI4LtHQ73vTzM=s5`2tJd$zw zmMZ1!Zjotm<1+Y!U7-tY%fR(TN6n3(9uT&%!pS~+Ke)1fap58=@CojN;S2ytX>8CJk#$iCGD%o03m%iuylsP#{UNvAf zcKrB}0zNu&_@mi)YhNuLQmui|&RO)@DwbN@tR}tc<(zIamGj?kraQZu^S;*-Wf*Kg zm3g+ZHzpL~;?%IYb^-RixQOQdYX*yI(y8dZqPf$K4K6Nw4nQ z;O{V4S=9}nj?RFAI}EY)1>|!|BYyQS4Q}hI@J2y*eBvh|5K!=a?Xo4{=IGazN zyFGYZcocj&l~3WWUZjcsN?J3l*;H_0??rCzr^j8%{o@EK(c7a$WNIsV%Tg%Eb(p;P zX)Ao2W5HkMwFbTDK-?787cD>irtfnBP7nVMHxB7wo~ne5eoVzm&uH*mwv#`M+eQlx z7l>!Apm;wUz7rh%iQ{@xx8+hgK_40}~fmHjYB_^7`p*E=rq6*px%btcP3l^pG_&@*ApigR? zF*Kc=8n@+U`?WYYbrC7=y}>J8}CbO)Sk))n@gYYutKa-n~h*Kjfi`0S?s zFzkalAG?LHu-!A-GI$)0*m@ayH(LiWPQZ5tnu`pmczh5rn7vD0Kw@