#Text-generation endpoint for the mistral model import requests, json import os hf_api_key = os.environ.get("HF_TOKEN") API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1" headers = {"Authorization": f"Bearer {hf_api_key}", "Content-Type": "application/json"} def query(inputs, parameters=None, ENDPOINT_URL=API_URL): data = {"inputs": inputs} if parameters is not None: data.update({"parameters": parameters}) response = requests.request("POST", ENDPOINT_URL, headers=headers, data=json.dumps(data)) return json.loads(response.content.decode("utf-8")) #Defining text generation function def generate_text(inputs): output = query(inputs) return output[0]['generated_text'] #Gradio Interface import gradio as gr gr.close_all() demo = gr.Interface(fn=generate_text, inputs=[gr.Textbox(label="Prompt", lines=5, placeholder="Enter your text here...")], outputs=[gr.Textbox(label="Generated Text", lines=5)], title="Text Generator", description="Text generation using `mistralai/Mistral-7B-Instruct-v0.1` model.", examples=["I like ice cream", "That cat is cute"], ) demo.launch()