Spaces:
Configuration error
Configuration error
Ubuntu
commited on
Commit
Β·
206c9a3
0
Parent(s):
Initial SHX commit π Ready to launch!
Browse files- .gitignore +1 -0
- README.md +3 -0
- SHX-setup.sh +279 -0
- app.py +53 -0
- requirements.txt +5 -0
- shx-ai.py +25 -0
- shx-config.json +7 -0
- shx-error.log +2 -0
- shx-setup.log +82 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
shx-venv/
|
README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# SHX-Auto: Multiversal System Builder
|
2 |
+
## π€― GPT-Neo-based automation terminal agent for quantum-native devs.
|
3 |
+
β¨ By: subatomicERROR
|
SHX-setup.sh
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -euo pipefail
|
3 |
+
IFS=$'\n\t'
|
4 |
+
|
5 |
+
# === ENV VARIABLES ===
|
6 |
+
export HF_HOME="$HOME/.cache/huggingface"
|
7 |
+
export MODEL_NAME="EleutherAI/gpt-neo-1.3B"
|
8 |
+
export WORK_DIR="$HOME/dev/shx-hfspace"
|
9 |
+
export VENV_DIR="$WORK_DIR/shx-venv"
|
10 |
+
export LOG_FILE="$WORK_DIR/shx-setup.log"
|
11 |
+
export CONFIG_FILE="$WORK_DIR/shx-config.json"
|
12 |
+
export HF_SPACE_NAME="SHX-Auto"
|
13 |
+
export HF_USERNAME="subatomicERROR"
|
14 |
+
|
15 |
+
# === COLORS ===
|
16 |
+
RED="\e[91m"
|
17 |
+
GREEN="\e[92m"
|
18 |
+
YELLOW="\e[93m"
|
19 |
+
CYAN="\e[96m"
|
20 |
+
RESET="\e[0m"
|
21 |
+
|
22 |
+
# === SELF-HEAL ===
|
23 |
+
trap 'echo -e "\n${RED}β Error occurred at line $LINENO: $BASH_COMMAND${RESET}" >> "$LOG_FILE"; echo -e "${YELLOW}π§ Triggering SHX Self-Healing...${RESET}"; shx_self_heal $LINENO "$BASH_COMMAND"' ERR
|
24 |
+
|
25 |
+
shx_self_heal() {
|
26 |
+
local line=$1
|
27 |
+
local cmd="$2"
|
28 |
+
echo -e "${CYAN}π Self-Healing (Line $line | Command: $cmd)${RESET}"
|
29 |
+
|
30 |
+
if [[ "$cmd" == *"pip install"* ]]; then
|
31 |
+
echo -e "${YELLOW}π Retrying pip install with --no-cache-dir...${RESET}"
|
32 |
+
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub || true
|
33 |
+
fi
|
34 |
+
|
35 |
+
if [[ "$cmd" == *"huggingface-cli login"* ]]; then
|
36 |
+
echo -e "${YELLOW}π Retrying interactive Hugging Face login...${RESET}"
|
37 |
+
huggingface-cli login || true
|
38 |
+
fi
|
39 |
+
|
40 |
+
if [[ "$cmd" == *"git push"* ]]; then
|
41 |
+
echo -e "${YELLOW}π Retrying git push...${RESET}"
|
42 |
+
git push -u origin main || true
|
43 |
+
fi
|
44 |
+
|
45 |
+
echo -e "${GREEN}β
Self-Heal Complete. Please rerun if needed.${RESET}"
|
46 |
+
exit 1
|
47 |
+
}
|
48 |
+
|
49 |
+
# === START ===
|
50 |
+
echo -e "${CYAN}\nπ [SHX] Launching Hyper-Intelligent Setup...\n${RESET}"
|
51 |
+
|
52 |
+
# === CLEAN + VENV ===
|
53 |
+
echo -e "${CYAN}π§Ή Preparing Virtual Environment...${RESET}"
|
54 |
+
rm -rf "$VENV_DIR"
|
55 |
+
python3 -m venv "$VENV_DIR"
|
56 |
+
source "$VENV_DIR/bin/activate"
|
57 |
+
echo -e "${GREEN}β
Venv activated at $VENV_DIR${RESET}"
|
58 |
+
|
59 |
+
# === DEPENDENCIES ===
|
60 |
+
echo -e "${CYAN}\nπ¦ Installing Python packages...${RESET}"
|
61 |
+
pip install --upgrade pip
|
62 |
+
pip install --no-cache-dir transformers torch gradio git-lfs huggingface_hub
|
63 |
+
|
64 |
+
# === CHECK TORCH ===
|
65 |
+
echo -e "${CYAN}π§ Verifying PyTorch...\n${RESET}"
|
66 |
+
PYTORCH_VERSION=$(python3 -c "import torch; print(torch.__version__)")
|
67 |
+
echo -e "${GREEN}β
PyTorch: $PYTORCH_VERSION${RESET}"
|
68 |
+
|
69 |
+
# === AUTHENTICATION ===
|
70 |
+
echo -e "\n${CYAN}π Enter your Hugging Face token:${RESET}"
|
71 |
+
read -s hf_token
|
72 |
+
huggingface-cli login --token "$hf_token"
|
73 |
+
export HF_TOKEN="$hf_token"
|
74 |
+
|
75 |
+
whoami_output=$(huggingface-cli whoami)
|
76 |
+
echo -e "${GREEN}β
Logged in as: $whoami_output${RESET}"
|
77 |
+
|
78 |
+
# === MODEL SELECTION ===
|
79 |
+
echo -e "\n${CYAN}π§ Select a model (default: EleutherAI/gpt-neo-1.3B):${RESET}"
|
80 |
+
read -p "Model name: " selected_model
|
81 |
+
MODEL_NAME=${selected_model:-EleutherAI/gpt-neo-1.3B}
|
82 |
+
export HF_MODEL="$MODEL_NAME"
|
83 |
+
|
84 |
+
# === CLEAR BROKEN CACHE ===
|
85 |
+
echo -e "${CYAN}\nπ Clearing broken cache for $MODEL_NAME...${RESET}"
|
86 |
+
rm -rf ~/.cache/huggingface/hub/models--EleutherAI--gpt-neo-1.3B
|
87 |
+
|
88 |
+
# === MODEL DOWNLOAD ===
|
89 |
+
echo -e "${CYAN}\nπ Downloading $MODEL_NAME Model (via GPTNeoForCausalLM)...\n${RESET}"
|
90 |
+
python3 - <<EOF
|
91 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
92 |
+
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
|
93 |
+
tokenizer = GPT2Tokenizer.from_pretrained("$MODEL_NAME")
|
94 |
+
tokenizer.pad_token = tokenizer.eos_token
|
95 |
+
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
|
96 |
+
print("β
Model ready (GPTNeoForCausalLM).")
|
97 |
+
EOF
|
98 |
+
|
99 |
+
# === GRADIO APP ===
|
100 |
+
echo -e "${CYAN}π₯οΈ Writing Gradio Interface...${RESET}"
|
101 |
+
cat <<EOF > "$WORK_DIR/app.py"
|
102 |
+
import gradio as gr
|
103 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
104 |
+
import torch
|
105 |
+
import json
|
106 |
+
import os
|
107 |
+
|
108 |
+
# Load configuration
|
109 |
+
with open("$CONFIG_FILE", "r") as f:
|
110 |
+
config = json.load(f)
|
111 |
+
|
112 |
+
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
|
113 |
+
tokenizer.pad_token = tokenizer.eos_token
|
114 |
+
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
|
115 |
+
|
116 |
+
chat_history = []
|
117 |
+
|
118 |
+
def shx_terminal(prompt, history):
|
119 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
120 |
+
input_ids = inputs.input_ids
|
121 |
+
attention_mask = inputs.attention_mask
|
122 |
+
pad_token_id = tokenizer.eos_token_id
|
123 |
+
|
124 |
+
try:
|
125 |
+
with torch.no_grad():
|
126 |
+
output = model.generate(
|
127 |
+
input_ids=input_ids,
|
128 |
+
attention_mask=attention_mask,
|
129 |
+
pad_token_id=pad_token_id,
|
130 |
+
max_length=config["max_length"],
|
131 |
+
temperature=config["temperature"],
|
132 |
+
top_k=config["top_k"],
|
133 |
+
top_p=config["top_p"],
|
134 |
+
do_sample=True
|
135 |
+
)
|
136 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
137 |
+
chat_history.append((prompt, response))
|
138 |
+
return response, chat_history
|
139 |
+
except Exception as e:
|
140 |
+
return f"β οΈ SHX caught an error during generation:\\n{str(e)}", chat_history
|
141 |
+
|
142 |
+
with gr.Blocks(css="body { background-color: black; color: #00FF41; font-family: monospace; }") as demo:
|
143 |
+
gr.Markdown("## π€ **SHX-Auto: Multiversal System Builder**")
|
144 |
+
with gr.Row():
|
145 |
+
with gr.Column():
|
146 |
+
input_box = gr.Textbox(label="Your Command")
|
147 |
+
output_box = gr.Textbox(label="SHX Response")
|
148 |
+
run_btn = gr.Button("Run")
|
149 |
+
run_btn.click(shx_terminal, inputs=[input_box, gr.State(chat_history)], outputs=[output_box, gr.State(chat_history)])
|
150 |
+
with gr.Column():
|
151 |
+
chat_box = gr.Chatbot(label="Chat History")
|
152 |
+
chat_box.update(chat_history)
|
153 |
+
|
154 |
+
demo.launch()
|
155 |
+
EOF
|
156 |
+
|
157 |
+
# === REQUIREMENTS & README ===
|
158 |
+
echo -e "${CYAN}π¦ Writing requirements.txt and README.md...${RESET}"
|
159 |
+
cat <<EOF > "$WORK_DIR/requirements.txt"
|
160 |
+
transformers
|
161 |
+
torch
|
162 |
+
gradio
|
163 |
+
git-lfs
|
164 |
+
huggingface_hub
|
165 |
+
EOF
|
166 |
+
|
167 |
+
cat <<EOF > "$WORK_DIR/README.md"
|
168 |
+
# SHX-Auto: Multiversal System Builder
|
169 |
+
## π€― GPT-Neo-based automation terminal agent for quantum-native devs.
|
170 |
+
β¨ By: subatomicERROR
|
171 |
+
EOF
|
172 |
+
|
173 |
+
# === CONFIGURATION FILE ===
|
174 |
+
echo -e "${CYAN}βοΈ Writing configuration file...${RESET}"
|
175 |
+
cat <<EOF > "$WORK_DIR/shx-config.json"
|
176 |
+
{
|
177 |
+
"model_name": "$MODEL_NAME",
|
178 |
+
"max_length": 150,
|
179 |
+
"temperature": 0.7,
|
180 |
+
"top_k": 50,
|
181 |
+
"top_p": 0.9
|
182 |
+
}
|
183 |
+
EOF
|
184 |
+
|
185 |
+
# === FINAL TEST ===
|
186 |
+
echo -e "${CYAN}\nπ§ͺ Running Final Test...${RESET}"
|
187 |
+
python3 - <<EOF
|
188 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
189 |
+
import json
|
190 |
+
|
191 |
+
# Load configuration
|
192 |
+
with open("$WORK_DIR/shx-config.json", "r") as f:
|
193 |
+
config = json.load(f)
|
194 |
+
|
195 |
+
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
|
196 |
+
tokenizer.pad_token = tokenizer.eos_token
|
197 |
+
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
|
198 |
+
prompt = "SHX is"
|
199 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
200 |
+
output = model.generate(
|
201 |
+
input_ids=inputs.input_ids,
|
202 |
+
attention_mask=inputs.attention_mask,
|
203 |
+
pad_token_id=tokenizer.eos_token_id,
|
204 |
+
max_length=config["max_length"],
|
205 |
+
temperature=config["temperature"],
|
206 |
+
top_k=config["top_k"],
|
207 |
+
top_p=config["top_p"],
|
208 |
+
do_sample=True
|
209 |
+
)
|
210 |
+
print("π§ SHX Test Output:", tokenizer.decode(output[0], skip_special_tokens=True))
|
211 |
+
EOF
|
212 |
+
|
213 |
+
echo -e "\n${GREEN}β
SHX is FULLY ONLINE and OPERATIONAL (with $MODEL_NAME)!${RESET}"
|
214 |
+
echo -e "${CYAN}π Access: https://huggingface.co/spaces/$HF_USERNAME/$HF_SPACE_NAME${RESET}"
|
215 |
+
|
216 |
+
# === AI-DRIVEN AUTOMATION ===
|
217 |
+
echo -e "${CYAN}\nπ€ Initializing AI-Driven Automation...${RESET}"
|
218 |
+
cat <<EOF > "$WORK_DIR/shx-ai.py"
|
219 |
+
import json
|
220 |
+
import subprocess
|
221 |
+
import os
|
222 |
+
|
223 |
+
# Load configuration
|
224 |
+
with open("$WORK_DIR/shx-config.json", "r") as f:
|
225 |
+
config = json.load(f)
|
226 |
+
|
227 |
+
def run_command(command):
|
228 |
+
try:
|
229 |
+
result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
230 |
+
return result.stdout
|
231 |
+
except subprocess.CalledProcessError as e:
|
232 |
+
return f"β οΈ Error: {e.stderr}"
|
233 |
+
|
234 |
+
def shx_ai(prompt):
|
235 |
+
# Generate response using the model
|
236 |
+
response = run_command(f"python3 $WORK_DIR/app.py --prompt '{prompt}'")
|
237 |
+
return response
|
238 |
+
|
239 |
+
# Example usage
|
240 |
+
if __name__ == "__main__":
|
241 |
+
prompt = "Create a simple web application with a form to collect user data."
|
242 |
+
response = shx_ai(prompt)
|
243 |
+
print(f"π€ SHX Response: {response}")
|
244 |
+
EOF
|
245 |
+
|
246 |
+
echo -e "${GREEN}β
AI-Driven Automation Initialized. Ready to build almost anything!${RESET}"
|
247 |
+
|
248 |
+
# === FINAL MESSAGE ===
|
249 |
+
echo ""
|
250 |
+
echo "π βοΈ Boom your SHX is ready! And now fully configured."
|
251 |
+
echo ""
|
252 |
+
echo "β
PyTorch: $PYTORCH_VERSION"
|
253 |
+
echo "β
Model: $HF_MODEL"
|
254 |
+
echo "β
Hugging Face Token saved for: $HF_USERNAME"
|
255 |
+
echo ""
|
256 |
+
echo "π οΈ Now to push your SHX Space manually to Hugging Face, follow these final steps:"
|
257 |
+
echo ""
|
258 |
+
echo "1. Initialize git in this folder:"
|
259 |
+
echo " git init"
|
260 |
+
echo ""
|
261 |
+
echo "2. Commit your SHX files:"
|
262 |
+
echo " git add . && git commit -m \"Initial SHX commit\""
|
263 |
+
echo ""
|
264 |
+
echo "3. Create the Space manually (choose SDK: gradio/static/etc):"
|
265 |
+
echo " huggingface-cli repo create SHX-Auto --type space --space-sdk gradio"
|
266 |
+
echo ""
|
267 |
+
echo "4. Add remote:"
|
268 |
+
echo " git remote add origin https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto"
|
269 |
+
echo ""
|
270 |
+
echo "5. Push your space:"
|
271 |
+
echo " git branch -M main && git push -u origin main"
|
272 |
+
echo ""
|
273 |
+
echo "π After that, visit: https://huggingface.co/spaces/$HF_USERNAME/SHX-Auto"
|
274 |
+
echo ""
|
275 |
+
echo "SHX interface will now be live on Hugging Face. HAPPY CODING!"
|
276 |
+
echo ""
|
277 |
+
echo "For more information and support, visit our GitHub repository:"
|
278 |
+
echo "https://github.com/subatomicERROR"
|
279 |
+
echo ""
|
app.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
3 |
+
import torch
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Load configuration
|
8 |
+
with open("/home/subatomicERROR/dev/shx-hfspace/shx-config.json", "r") as f:
|
9 |
+
config = json.load(f)
|
10 |
+
|
11 |
+
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
|
12 |
+
tokenizer.pad_token = tokenizer.eos_token
|
13 |
+
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
|
14 |
+
|
15 |
+
chat_history = []
|
16 |
+
|
17 |
+
def shx_terminal(prompt, history):
|
18 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
19 |
+
input_ids = inputs.input_ids
|
20 |
+
attention_mask = inputs.attention_mask
|
21 |
+
pad_token_id = tokenizer.eos_token_id
|
22 |
+
|
23 |
+
try:
|
24 |
+
with torch.no_grad():
|
25 |
+
output = model.generate(
|
26 |
+
input_ids=input_ids,
|
27 |
+
attention_mask=attention_mask,
|
28 |
+
pad_token_id=pad_token_id,
|
29 |
+
max_length=config["max_length"],
|
30 |
+
temperature=config["temperature"],
|
31 |
+
top_k=config["top_k"],
|
32 |
+
top_p=config["top_p"],
|
33 |
+
do_sample=True
|
34 |
+
)
|
35 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
36 |
+
chat_history.append((prompt, response))
|
37 |
+
return response, chat_history
|
38 |
+
except Exception as e:
|
39 |
+
return f"β οΈ SHX caught an error during generation:\n{str(e)}", chat_history
|
40 |
+
|
41 |
+
with gr.Blocks(css="body { background-color: black; color: #00FF41; font-family: monospace; }") as demo:
|
42 |
+
gr.Markdown("## π€ **SHX-Auto: Multiversal System Builder**")
|
43 |
+
with gr.Row():
|
44 |
+
with gr.Column():
|
45 |
+
input_box = gr.Textbox(label="Your Command")
|
46 |
+
output_box = gr.Textbox(label="SHX Response")
|
47 |
+
run_btn = gr.Button("Run")
|
48 |
+
run_btn.click(shx_terminal, inputs=[input_box, gr.State(chat_history)], outputs=[output_box, gr.State(chat_history)])
|
49 |
+
with gr.Column():
|
50 |
+
chat_box = gr.Chatbot(label="Chat History")
|
51 |
+
chat_box.update(chat_history)
|
52 |
+
|
53 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
git-lfs
|
5 |
+
huggingface_hub
|
shx-ai.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import subprocess
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Load configuration
|
6 |
+
with open("/home/subatomicERROR/dev/shx-hfspace/shx-config.json", "r") as f:
|
7 |
+
config = json.load(f)
|
8 |
+
|
9 |
+
def run_command(command):
|
10 |
+
try:
|
11 |
+
result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
12 |
+
return result.stdout
|
13 |
+
except subprocess.CalledProcessError as e:
|
14 |
+
return f"β οΈ Error: {e.stderr}"
|
15 |
+
|
16 |
+
def shx_ai(prompt):
|
17 |
+
# Generate response using the model
|
18 |
+
response = run_command(f"python3 /home/subatomicERROR/dev/shx-hfspace/app.py --prompt '{prompt}'")
|
19 |
+
return response
|
20 |
+
|
21 |
+
# Example usage
|
22 |
+
if __name__ == "__main__":
|
23 |
+
prompt = "Create a simple web application with a form to collect user data."
|
24 |
+
response = shx_ai(prompt)
|
25 |
+
print(f"π€ SHX Response: {response}")
|
shx-config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "EleutherAI/gpt-neo-1.3B",
|
3 |
+
"max_length": 150,
|
4 |
+
"temperature": 0.7,
|
5 |
+
"top_k": 50,
|
6 |
+
"top_p": 0.9
|
7 |
+
}
|
shx-error.log
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
β Error occurred at line 72: git lfs track "*.bin"
|
shx-setup.log
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
[91mβ Error occurred at line 76: python3 - <<EOF
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
+
print("π Downloading tokenizer & model...")
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("$MODEL_NAME")
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("$MODEL_NAME")
|
7 |
+
print("β
Model ready.")
|
8 |
+
EOF
|
9 |
+
[0m
|
10 |
+
|
11 |
+
[91mβ Error occurred at line 76: python3 - <<EOF
|
12 |
+
from transformers import AutoTokenizer, GPTNeoForCausalLM
|
13 |
+
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("$MODEL_NAME")
|
15 |
+
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
|
16 |
+
print("β
Model ready (GPTNeoForCausalLM).")
|
17 |
+
EOF
|
18 |
+
[0m
|
19 |
+
|
20 |
+
[91mβ Error occurred at line 76: python3 - <<EOF
|
21 |
+
from transformers import AutoTokenizer, GPTNeoForCausalLM
|
22 |
+
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained("$MODEL_NAME")
|
24 |
+
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
|
25 |
+
print("β
Model ready (GPTNeoForCausalLM).")
|
26 |
+
EOF
|
27 |
+
[0m
|
28 |
+
|
29 |
+
[91mβ Error occurred at line 74: python3 - <<EOF
|
30 |
+
from transformers import AutoTokenizer, GPTNeoForCausalLM
|
31 |
+
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained("$MODEL_NAME")
|
33 |
+
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
|
34 |
+
print("β
Model ready (GPTNeoForCausalLM).")
|
35 |
+
EOF
|
36 |
+
[0m
|
37 |
+
|
38 |
+
[91mβ Error occurred at line 88: python3 - <<EOF
|
39 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
40 |
+
print("π Downloading tokenizer & model (GPTNeoForCausalLM)...")
|
41 |
+
tokenizer = GPT2Tokenizer.from_pretrained("$MODEL_NAME")
|
42 |
+
model = GPTNeoForCausalLM.from_pretrained("$MODEL_NAME")
|
43 |
+
print("β
Model ready (GPTNeoForCausalLM).")
|
44 |
+
EOF
|
45 |
+
[0m
|
46 |
+
|
47 |
+
[91mβ Error occurred at line 182: huggingface-cli repo create "$HF_USERNAME/$HF_SPACE_NAME" --type space --space-sdks gradio[0m
|
48 |
+
|
49 |
+
[91mβ Error occurred at line 182: huggingface-cli repo create "$HF_USERNAME/$HF_SPACE_NAME" --type space[0m
|
50 |
+
|
51 |
+
[91mβ Error occurred at line 182: huggingface-cli repo create "$HF_USERNAME/$HF_SPACE_NAME" --type space[0m
|
52 |
+
|
53 |
+
[91mβ Error occurred at line 182: huggingface-cli repo create "$HF_USERNAME/$HF_SPACE_NAME" --type space[0m
|
54 |
+
|
55 |
+
[91mβ Error occurred at line 182: huggingface-cli repo create "$HF_SPACE_NAME" --type space[0m
|
56 |
+
|
57 |
+
[91mβ Error occurred at line 216: huggingface-cli repo create "$HF_SPACE_NAME" --type space --space-sdk gradio[0m
|
58 |
+
|
59 |
+
[91mβ Error occurred at line 184: python3 - <<EOF
|
60 |
+
from transformers import GPT2Tokenizer, GPTNeoForCausalLM
|
61 |
+
import json
|
62 |
+
|
63 |
+
# Load configuration
|
64 |
+
with open("$WORK_DIR/shx-config.json", "r") as f:
|
65 |
+
config = json.load(f)
|
66 |
+
|
67 |
+
tokenizer = GPT2Tokenizer.from_pretrained(config["model_name"])
|
68 |
+
model = GPTNeoForCausalLM.from_pretrained(config["model_name"])
|
69 |
+
prompt = "SHX is"
|
70 |
+
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
|
71 |
+
output = model.generate(
|
72 |
+
input_ids=inputs.input_ids,
|
73 |
+
attention_mask=inputs.attention_mask,
|
74 |
+
pad_token_id=tokenizer.eos_token_id,
|
75 |
+
max_length=config["max_length"],
|
76 |
+
temperature=config["temperature"],
|
77 |
+
top_k=config["top_k"],
|
78 |
+
top_p=config["top_p"]
|
79 |
+
)
|
80 |
+
print("π§ SHX Test Output:", tokenizer.decode(output[0], skip_special_tokens=True))
|
81 |
+
EOF
|
82 |
+
[0m
|