File size: 4,349 Bytes
efb8465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import time
from typing import Any

import requests
import streamlit as st
from dotenv import find_dotenv, load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from transformers import pipeline

from utils.custom import css_code

load_dotenv(find_dotenv())
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")


def progress_bar(amount_of_time: int) -> Any:
    """
    A very simple progress bar the increases over time,
    then disappears when it reached completion
    :param amount_of_time: time taken
    :return: None
    """
    progress_text = "Lütfen bekleyin, model şu an çalışıyor..."
    my_bar = st.progress(0, text=progress_text)

    for percent_complete in range(amount_of_time):
        time.sleep(0.04)
        my_bar.progress(percent_complete + 1, text=progress_text)
    time.sleep(1)
    my_bar.empty()


def generate_text_from_image(url: str) -> str:
    """
    A function that uses the blip model to generate text from an image.
    :param url: image location
    :return: text: generated text from the image
    """
    image_to_text: Any = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")

    generated_text: str = image_to_text(url)[0]["generated_text"]

    print(f"IMAGE INPUT: {url}")
    print(f"GENERATED TEXT OUTPUT: {generated_text}")
    return generated_text


def generate_story_from_text(scenario: str) -> str:
    """
    A function using a prompt template and GPT to generate a short story. LangChain is also
    used for chaining purposes
    :param scenario: generated text from the image
    :return: generated story from the text
    """
    prompt_template: str = f"""
    Basit bir anlatımdan hikaye yaratabilen yetenekli bir hikaye anlatıcısısınız./
    Aşağıdaki senaryoyu kullanarak bir hikaye oluşturun; Hikaye en fazla  100 kelime uzunluğunda olmalı;
    
    CONTEXT: {scenario}
    STORY:
    """

    prompt: PromptTemplate = PromptTemplate(template=prompt_template, input_variables=["scenario"])

    llm: Any = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)

    story_llm: Any = LLMChain(llm=llm, prompt=prompt, verbose=True)

    generated_story: str = story_llm.predict(scenario=scenario)

    print(f"TEXT INPUT: {scenario}")
    print(f"GENERATED STORY OUTPUT: {generated_story}")
    return generated_story


def generate_speech_from_text(message: str) -> Any:
    """
    A function using the ESPnet text to speech model from HuggingFace
    :param message: short story generated by the GPT model
    :return: generated audio from the short story
    """
    API_URL: str = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
    headers: dict[str, str] = {"Authorization": f"Bearer {HUGGINGFACE_API_TOKEN}"}
    payloads: dict[str, str] = {
        "input": message
    }

    response: Any = requests.post(API_URL, headers=headers, json=payloads)
    with open("generated_audio.flac", "wb") as file:
        file.write(response.content)


def main() -> None:
    """
    Main function
    :return: None
    """
    st.set_page_config(page_title= "Resimden Hikayeye Çevirme", page_icon= "🤖")

    st.markdown(css_code, unsafe_allow_html=True)

    with st.sidebar:
        
        st.image("img/FED.jpg")
        st.write("---")
        st.write("FEDAİ HİKAYE BOTU")

    st.header("Resimden Hikayeye Çevirme")
    uploaded_file: Any = st.file_uploader("Lütfen resim yükleyiniz...", type="jpg")

    if uploaded_file is not None:
        print(uploaded_file)
        bytes_data: Any = uploaded_file.getvalue()
        with open(uploaded_file.name, "wb") as file:
            file.write(bytes_data)
        st.image(uploaded_file, caption="Resim Yüklendi Lütfen Bekleyiniz...",
                 use_column_width=True)
        progress_bar(100)
        scenario: str = generate_text_from_image(uploaded_file.name)
        story: str = generate_story_from_text(scenario)
        generate_speech_from_text(story)

        with st.expander("Hikayenin Konusu"):
            st.write(scenario)
        with st.expander("Oluşturulan Hikaye"):
            st.write(story)

        st.audio("generated_audio.flac")


if __name__ == "__main__":
    main()