From d047a5c8f0047ec7953e8850597e62dfdfdd93d5 Mon Sep 17 00:00:00 2001 From: pks Date: Sun, 30 Nov 2025 21:21:10 +0100 Subject: make_dataset script --- make_dataset.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 make_dataset.py diff --git a/make_dataset.py b/make_dataset.py new file mode 100755 index 0000000..a3ce0ab --- /dev/null +++ b/make_dataset.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +import json + +from datasets import Dataset, Image +from glob import glob + + +def make_dataset(base="./baseline"): # TODO: Make actual hf dataset + prompt = "You are a professional English-German translator and also a renowned photography critic.\n\nWrite a detailed caption for this image in a single sentence. Translate the caption into German. The output needs to be JSON, the keys being 'English' and 'German' for the respective captions. Only output the JSON, nothing else." + "" + user_prompts = [] + images = [] + assistant_replies = [] + for filename in glob(f"{base}/*.jsonl"): + with open(filename, "r") as f: + data = json.loads(f.read()) + image_path = f"../d/Images/{os.path.basename(filename).removesuffix(".jsonl")}.jpg" + user_prompts.append(prompt) + assistant_replies.append(json.dumps({ + "English": data["English"], + "German": data["Translation"], + }, ensure_ascii=False, indent=0)) + images.append(image_path) + + return Dataset.from_dict({"image": images, "user": user_prompts, "assistant": assistant_replies}).cast_column("image", Image()) + +dataset = make_dataset() -- cgit v1.2.3