1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
#!/usr/bin/env python
import huggingface_hub
import json
import os
from datasets import Dataset, DatasetDict
from glob import glob
from PIL import Image
def make_dataset(base="./baseline"):
prompt = "You are a professional English-German translator and also a renowned photography critic.\n\nWrite a detailed caption for this image in a single sentence. Translate the caption into German. The output needs to be JSON, the keys being 'English' and 'German' for the respective captions. Only output the JSON, nothing else." + "<start_of_image>"
user_prompts = []
images = []
ids = []
assistant_replies = []
for filename in glob(f"{base}/*.jsonl"):
with open(filename, "r") as f:
data = json.loads(f.read())
id_ = os.path.basename(filename).removesuffix(".jsonl")
image_path = f"../d/Images/{id_.removesuffix(".jsonl")}.jpg"
user_prompts.append(prompt)
assistant_replies.append(json.dumps({
"English": data["English"],
"German": data["Translation"],
}, ensure_ascii=False, indent=0))
ids.append(id_)
images.append(Image.open(image_path).convert("RGB"))
return Dataset.from_dict({"id": ids, "image": images, "user": user_prompts, "assistant": assistant_replies})
def main():
huggingface_hub.login()
dataset = make_dataset()
splits = dataset.train_test_split(
test_size=0.1,
seed=42,
)
train_valid = splits["train"].train_test_split(
test_size=0.111111,
seed=42,
)
dataset = DatasetDict({
"train": train_valid["train"],
"dev": train_valid["test"],
"test": splits["test"],
})
dataset.push_to_hub("asdf2k/caption_translation", private=True)
if __name__ == "__main__":
main()
|