from langchain_ollama import OllamaLLM
from langchain.prompts import ChatPromptTemplate
import json
import os

# 1. Load the Ollama model
llm = OllamaLLM(model="gemma3:12b")

# 2. Create a translation prompt
prompt = ChatPromptTemplate.from_template(
    "Translate the following English text to Hindi and return only the translation:\n\n{text}"
)

# 3. Translate function
def translate_to_hindi(text: str):
    chain = prompt | llm
    hindi_text = chain.invoke({"text": text})
    return hindi_text

# 4. Append results into JSON file
def save_translation(english_text, hindi_translation, filename="translations.json"):
    # Load existing data if file exists
    if os.path.exists(filename):
        with open(filename, "r", encoding="utf-8") as f:
            data = json.load(f)
    else:
        data = []

    # Append new entry
    data.append({
        "english_text": english_text,
        "hindi_translation": hindi_translation
    })

    # Save back
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

    print(f"✅ Saved to {filename}")

# ---------------------------
# Example usage
# ---------------------------
english_inputs = [
    "Good morning, have a nice day!",
    "The weather is very pleasant today.",
    "We are learning AI and machine learning.",
    "Artificial Intelligence is rapidly transforming industries across the globe, enabling automation of complex tasks, improving decision-making with data-driven insights, and creating opportunities for innovation in areas such as healthcare, education, and sustainable development."
]

for sentence in english_inputs:
    hindi = translate_to_hindi(sentence)
    save_translation(sentence, hindi)
