-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpreprocess.py
executable file
·117 lines (97 loc) · 3.48 KB
/
preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#!/usr/bin/env python3
import os
import argparse
import logging
import json
import random
from data import get_dataset_class_by_name
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO,
datefmt="%H:%M:%S",
)
logger = logging.getLogger(__name__)
class Preprocessor:
"""
Load the raw dataset using a loader specified in `data.py`,
process it, and save it in the `./data/{output_dir}` directory.
By default, a directory with processed dataset will contain the files `train.json`, `dev.json`, `test.json`,
each file with the following structure:
{
"data" : [
{... data entry #1 ...},
{... data entry #2 ...},
.
.
.
{... data entry #N ...},
]
}
This format is expected for loading the data into PyTorch dataloaders for training and inference.
"""
def __init__(self, dataset, out_dirname, mode):
self.dataset = dataset
self.out_dirname = out_dirname
self.mode = mode
def create_examples(self, entry, dataset):
"""
Generates training examples from an entry in the dataset
"""
examples = []
if self.mode == "causal_lm":
# input == output for causal LM
example = {"in": entry}
elif self.mode == "seq2seq":
# a simple case without any extra processing
example = {"in": entry[0], "out": entry[1]}
else:
raise ValueError("Unknown mode")
examples.append(example)
return examples
def process(self, split):
output = {"data": []}
data = self.dataset.data[split]
for entry in data:
examples = self.create_examples(entry, dataset)
for example in examples:
output["data"].append(example)
with open(os.path.join(self.out_dirname, f"{split}.json"), "w") as f:
json.dump(output, f, indent=4, ensure_ascii=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, required=True, help="Name of the dataset to preprocess.")
parser.add_argument("--dataset_dir", type=str, default=None, help="Path to the dataset")
parser.add_argument(
"--mode",
choices=["causal_lm", "seq2seq"],
required=True,
help="Preprocessing mode, depends on the dataset",
)
parser.add_argument("--output", type=str, required=True, help="Name of the output directory")
parser.add_argument(
"--splits",
type=str,
nargs="+",
default=["train", "dev", "test"],
help="Dataset splits (e.g. train dev test)",
)
parser.add_argument("--seed", type=int, default=42, help="Random seed.")
args = parser.parse_args()
random.seed(args.seed)
logger.info(args)
dataset = get_dataset_class_by_name(args.dataset)()
try:
dataset.load(splits=args.splits, path=args.dataset_dir)
except FileNotFoundError as err:
logger.error(f"Dataset could not be loaded")
raise err
try:
out_dirname = args.output
os.makedirs(out_dirname, exist_ok=True)
except OSError as err:
logger.error(f"Output directory {out_dirname} can not be created")
raise err
preprocessor = Preprocessor(dataset=dataset, out_dirname=out_dirname, mode=args.mode)
for split in args.splits:
preprocessor.process(split)
logger.info(f"Preprocessing finished.")