Spaces:
Runtime error
Runtime error
import gradio as gr | |
import requests | |
import os | |
# Hugging Face Inference API token | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
def call_llama(prompt): | |
headers = { | |
"Authorization": f"Bearer {HF_TOKEN}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"inputs": prompt, | |
"parameters": {"max_new_tokens": 500}, | |
} | |
response = requests.post( | |
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1", | |
headers=headers, | |
json=payload | |
) | |
output = response.json() | |
return output[0]["generated_text"] if isinstance(output, list) else output | |
def organize_tasks(task_list): | |
prompt = f""" | |
You are a helpful assistant that creates a time-blocked schedule from 9AM to 5PM. | |
Here are today's tasks with durations: | |
{task_list} | |
Return a schedule in table format (Time | Task). | |
""" | |
return call_llama(prompt) | |
gr.Interface( | |
fn=organize_tasks, | |
inputs=gr.Textbox(label="Enter tasks & durations (one per line)", lines=8, placeholder="e.g.\nWrite report - 90 min\nEmails - 30 min"), | |
outputs=gr.Textbox(label="Generated Schedule"), | |
title="🧠 Task Scheduler with LLaMA", | |
description="Powered by LLaMA via Hugging Face Inference API", | |
).launch() | |