summaryrefslogtreecommitdiff
path: root/make_dataset.py
blob: a3ce0abd3a290e800faa1f98689e2e209063f3ac (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#!/usr/bin/env python

import json

from datasets import Dataset, Image
from glob import glob


def make_dataset(base="./baseline"):  # TODO: Make actual hf dataset
    prompt = "You are a professional English-German translator and also a renowned photography critic.\n\nWrite a detailed caption for this image in a single sentence. Translate the caption into German. The output needs to be JSON, the keys being 'English' and 'German' for the respective captions. Only output the JSON, nothing else." + "<start_of_image>"
    user_prompts = []
    images = []
    assistant_replies = []
    for filename in glob(f"{base}/*.jsonl"):
        with open(filename, "r") as f:
            data = json.loads(f.read())
        image_path = f"../d/Images/{os.path.basename(filename).removesuffix(".jsonl")}.jpg"
        user_prompts.append(prompt)
        assistant_replies.append(json.dumps({
            "English": data["English"],
            "German": data["Translation"],
        }, ensure_ascii=False, indent=0))
        images.append(image_path)

    return Dataset.from_dict({"image": images, "user": user_prompts, "assistant": assistant_replies}).cast_column("image", Image())

dataset = make_dataset()