Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ hf_token = os.getenv('HF_API_TOKEN')
|
|
10 |
if not hf_token:
|
11 |
raise ValueError("Hugging Face API token is not set. Please set the HF_API_TOKEN environment variable.")
|
12 |
|
13 |
-
|
14 |
|
15 |
|
16 |
# Define your prompt template
|
@@ -46,12 +46,12 @@ def format_prompt(description, columns):
|
|
46 |
import requests
|
47 |
|
48 |
# Define your Streamlit Space inference URL
|
49 |
-
inference_endpoint = "https://yakine
|
50 |
|
51 |
def generate_synthetic_data(description, columns):
|
52 |
try:
|
53 |
-
# Format the prompt
|
54 |
-
formatted_prompt =
|
55 |
|
56 |
# Send a POST request to the Streamlit Space API
|
57 |
headers = {
|
@@ -59,7 +59,7 @@ def generate_synthetic_data(description, columns):
|
|
59 |
"Content-Type": "application/json"
|
60 |
}
|
61 |
data = {
|
62 |
-
"inputs": formatted_prompt,
|
63 |
"parameters": {
|
64 |
"max_new_tokens": 512,
|
65 |
"top_p": 0.90,
|
@@ -67,13 +67,13 @@ def generate_synthetic_data(description, columns):
|
|
67 |
}
|
68 |
}
|
69 |
|
70 |
-
response = requests.post(inference_endpoint, json=data, headers=headers)
|
71 |
|
72 |
if response.status_code != 200:
|
73 |
return f"Error: {response.status_code}, {response.text}"
|
74 |
|
75 |
# Extract the generated text from the response
|
76 |
-
generated_text = response.json()
|
77 |
return generated_text
|
78 |
|
79 |
except Exception as e:
|
|
|
10 |
if not hf_token:
|
11 |
raise ValueError("Hugging Face API token is not set. Please set the HF_API_TOKEN environment variable.")
|
12 |
|
13 |
+
|
14 |
|
15 |
|
16 |
# Define your prompt template
|
|
|
46 |
import requests
|
47 |
|
48 |
# Define your Streamlit Space inference URL
|
49 |
+
inference_endpoint = "https://huggingface.co/spaces/yakine/model" # Update this to your Streamlit Space URL
|
50 |
|
51 |
def generate_synthetic_data(description, columns):
|
52 |
try:
|
53 |
+
# Format the prompt for your Llama 3 model
|
54 |
+
formatted_prompt = f"{description}, with columns: {', '.join(columns)}" # Adjust this based on your Streamlit app's prompt format
|
55 |
|
56 |
# Send a POST request to the Streamlit Space API
|
57 |
headers = {
|
|
|
59 |
"Content-Type": "application/json"
|
60 |
}
|
61 |
data = {
|
62 |
+
"inputs": formatted_prompt, # Adjust according to the input expected by your Streamlit app
|
63 |
"parameters": {
|
64 |
"max_new_tokens": 512,
|
65 |
"top_p": 0.90,
|
|
|
67 |
}
|
68 |
}
|
69 |
|
70 |
+
response = requests.post(inference_endpoint + "/predict", json=data, headers=headers)
|
71 |
|
72 |
if response.status_code != 200:
|
73 |
return f"Error: {response.status_code}, {response.text}"
|
74 |
|
75 |
# Extract the generated text from the response
|
76 |
+
generated_text = response.json().get('data') # Adjust based on your Streamlit Space response structure
|
77 |
return generated_text
|
78 |
|
79 |
except Exception as e:
|