Spaces:
Sleeping
Sleeping
File size: 2,811 Bytes
210dbcf 1a39e80 210dbcf 00483df b5e592d 00483df d9888b5 210dbcf 233c0df 210dbcf 233c0df f426ecf 0a59b7e 1a768e2 664e66d 210dbcf 5f188cd 210dbcf 233c0df 210dbcf 233c0df 5f188cd 210dbcf 233c0df 210dbcf 233c0df 210dbcf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
from langchain.prompts import PromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.output_parsers import JsonOutputParser
from langdetect import detect
import time
# Initialize the LLM and other components
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
task="text-generation",
max_new_tokens=4096,
temperature=0.5,
do_sample=False,
)
template_classify = '''
You are a topic detector bot. Your task is to determine the main topic of given text phrase.
Answer general main topic not specific words.
Your answer does not contain specific information from given text.
Answer just one general main topic. Not more than one topic.
Answer shortly with two or three word phrase. Do not answer with long sentence.
If you do not know the topic just answer as General.
What is the main topic of given text?:
<text>
{TEXT}
</text>
'''
template_json = '''
Your task is to read the following text, convert it to json format using 'Answer' as key and return it.
<text>
{RESPONSE}
</text>
Your final response MUST contain only the response, no other text.
Example:
{{"Answer":["General"]}}
'''
json_output_parser = JsonOutputParser()
# Define the classify_text function
def classify_text(text):
global llm
start = time.time()
lang = detect(text)
language_map = {"tr": "turkish",
"en": "english",
"ar": "arabic",
"es": "spanish",
"it": "italian",
}
lang = language_map[lang]
prompt_classify = PromptTemplate(
template=template_classify,
input_variables=["LANG", "TEXT"]
)
formatted_prompt = prompt_classify.format(TEXT=text, LANG=lang)
classify = llm.invoke(formatted_prompt)
prompt_json = PromptTemplate(
template=template_json,
input_variables=["RESPONSE"]
)
formatted_prompt = template_json.format(RESPONSE=classify)
response = llm.invoke(formatted_prompt)
parsed_output = json_output_parser.parse(response)
end = time.time()
duration = end - start
return parsed_output, duration #['Answer']
# Create the Gradio interface
def gradio_app(text):
classification, time_taken = classify_text(text)
return classification, f"Time taken: {time_taken:.2f} seconds"
def create_gradio_interface():
with gr.Blocks() as iface:
text_input = gr.Textbox(label="Text")
output_text = gr.Textbox(label="Topics")
time_taken = gr.Textbox(label="Time Taken (seconds)")
submit_btn = gr.Button("Classify")
submit_btn.click(fn=classify_text, inputs=text_input, outputs=[output_text, time_taken])
iface.launch()
if __name__ == "__main__":
create_gradio_interface()
|