-
Notifications
You must be signed in to change notification settings - Fork 1
/
finalize_transformer_sentence_data.py
52 lines (36 loc) · 1.62 KB
/
finalize_transformer_sentence_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import glob
import pandas as pd
from os import path, makedirs
INPUT_DIR = "output/attention_data/sentences/"
OUTPUT_DIR = "output/normalized_attention_data/sentences/"
FILE_INDEX = len(INPUT_DIR.split("/")) - 1
def normalize_data(filepath):
split_path = filepath.split("/")
output_dir = f'{OUTPUT_DIR}{"/".join(split_path[FILE_INDEX:-1])}/'
output_file = output_dir + split_path[-1]
print(output_file)
if path.isfile(output_file):
print(f"{output_file} already exists - skipping creation")
else:
transformer_df = pd.read_csv(filepath)
normalised_dfs = []
for sentence in transformer_df["SENTENCE_ID"].unique():
print(sentence)
mask = transformer_df["SENTENCE_ID"] == sentence
current_df = transformer_df[mask].select_dtypes(exclude="object")
normalised_dfs.append(current_df / current_df.sum())
normal_trans_df = pd.concat(normalised_dfs)
if normal_trans_df.shape[0] == transformer_df.shape[0]:
full_normal_trans = pd.concat([transformer_df, normal_trans_df], ignore_index=True, axis=1)
full_normal_trans.columns = list(transformer_df.columns) + list(
map(lambda s: f"{s}_norm", list(normal_trans_df.columns)))
if not path.isdir(output_dir):
makedirs(output_dir)
full_normal_trans.to_csv(output_file)
def main():
filepaths = [file.replace("\\", "/") for file in glob.glob(f'{INPUT_DIR}/**/*.csv', recursive=True)]
for filepath in filepaths:
normalize_data(filepath)
print("Done!")
if __name__ == "__main__":
main()