Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
UPDATED JANUARY 21 - 2025
|
3 |
+
To fix the HF API TOKEN STUFF___DONE
|
4 |
+
"""
|
5 |
+
|
6 |
+
import datetime, random, string
|
7 |
+
import gradio as gr
|
8 |
+
#from openai import OpenAI
|
9 |
+
from gradio_client import Client
|
10 |
+
from PIL import Image
|
11 |
+
from rich.console import Console
|
12 |
+
import os
|
13 |
+
from huggingface_hub import InferenceClient
|
14 |
+
|
15 |
+
|
16 |
+
console = Console(width=80)
|
17 |
+
theme=gr.themes.Default(primary_hue="blue", secondary_hue="pink",
|
18 |
+
font=[gr.themes.GoogleFont("Oxanium"), "Arial", "sans-serif"])
|
19 |
+
|
20 |
+
def checkHFT(hf_token):
|
21 |
+
if 'hf_' in hf_token:
|
22 |
+
return gr.Row(visible=True),gr.Row(visible=True),gr.Row(visible=True),gr.Row(visible=True),"✅HF TOKEN detected"
|
23 |
+
|
24 |
+
else:
|
25 |
+
gr.Warning("⚠️ You don't have a Hugging Face Token set")
|
26 |
+
return gr.Row(visible=False),gr.Row(visible=False),gr.Row(visible=False),gr.Row(visible=False), "⚠️ You don't have a Hugging Face Token set"
|
27 |
+
|
28 |
+
|
29 |
+
def writehistory(filename,text):
|
30 |
+
"""
|
31 |
+
save a string into a logfile with python file operations
|
32 |
+
filename -> str pathfile/filename
|
33 |
+
text -> str, the text to be written in the file
|
34 |
+
"""
|
35 |
+
with open(f'{filename}', 'a', encoding='utf-8') as f:
|
36 |
+
f.write(text)
|
37 |
+
f.write('\n')
|
38 |
+
f.close()
|
39 |
+
|
40 |
+
def genRANstring(n):
|
41 |
+
"""
|
42 |
+
n = int number of char to randomize
|
43 |
+
Return -> str, the filename with n random alphanumeric charachters
|
44 |
+
"""
|
45 |
+
N = n
|
46 |
+
res = ''.join(random.choices(string.ascii_uppercase +
|
47 |
+
string.digits, k=N))
|
48 |
+
return f'Logfile_{res}.txt'
|
49 |
+
|
50 |
+
LOGFILENAME = genRANstring(5)
|
51 |
+
|
52 |
+
################## STABLE DIFFUSION PROMPT ##############################
|
53 |
+
def createSDPrompt(token,headers):
|
54 |
+
#bruteText = bruteText.replace('\n\n','\n')
|
55 |
+
SD_prompt = f'''Create a prompt for Stable Diffusion based on the information below. Return only the prompt.\n---\n{headers}\n\nPROMPT:'''
|
56 |
+
client = InferenceClient(token=token)
|
57 |
+
messages = [{"role": "user", "content": SD_prompt}]
|
58 |
+
completion = client.chat.completions.create(
|
59 |
+
model="Qwen/Qwen2.5-72B-Instruct",
|
60 |
+
messages=messages,
|
61 |
+
max_tokens=500
|
62 |
+
)
|
63 |
+
print(completion.choices[0].message.content)
|
64 |
+
ImageGEN_prompt = completion.choices[0].message.content
|
65 |
+
return ImageGEN_prompt
|
66 |
+
|
67 |
+
############### CREATE IMAGE ##########################
|
68 |
+
def CreateImage(token,ImageGEN_prompt):
|
69 |
+
from gradio_client import Client
|
70 |
+
from gradio_client import handle_file
|
71 |
+
from PIL import Image
|
72 |
+
client = Client("stabilityai/stable-diffusion-3.5-large",hf_token=token)
|
73 |
+
result = client.predict(
|
74 |
+
prompt=ImageGEN_prompt,
|
75 |
+
negative_prompt='blur',
|
76 |
+
seed=0,
|
77 |
+
randomize_seed=True,
|
78 |
+
width=1360,
|
79 |
+
height=768,
|
80 |
+
guidance_scale=4.5,
|
81 |
+
num_inference_steps=30,
|
82 |
+
api_name="/infer"
|
83 |
+
)
|
84 |
+
############ SAVE IMAGE ##########################
|
85 |
+
from gradio_client import handle_file
|
86 |
+
temp = result[0]
|
87 |
+
from PIL import Image
|
88 |
+
image = Image.open(temp)
|
89 |
+
imagename = datetime.datetime.strftime(datetime.datetime.now(),'IMage_%Y-%m-%d_%H-%M-%S.png')
|
90 |
+
image.save(imagename)
|
91 |
+
print(f'Image saved as {imagename}...')
|
92 |
+
return image, imagename
|
93 |
+
|
94 |
+
def openDIR():
|
95 |
+
import os
|
96 |
+
current_directory = os.getcwd()
|
97 |
+
print("Current Directory:", current_directory)
|
98 |
+
os.system(f'start explorer "{current_directory}"')
|
99 |
+
|
100 |
+
############# TWEET GENERATION #########################
|
101 |
+
def createTweets(token,bruteText):
|
102 |
+
Tweet_prompt = f"Read the following newsletter. rewrite it into 3 twitter posts in English, in progression.\n---\n{bruteText}"
|
103 |
+
from rich.console import Console
|
104 |
+
console = Console(width=80)
|
105 |
+
# using https://huggingface.co/spaces/eswardivi/phi-4
|
106 |
+
client = Client("eswardivi/phi-4",hf_token=token)
|
107 |
+
result = client.predict(
|
108 |
+
message=Tweet_prompt,
|
109 |
+
param_2=0.7,
|
110 |
+
param_3=True,
|
111 |
+
param_4=512,
|
112 |
+
api_name="/chat"
|
113 |
+
)
|
114 |
+
print(result)
|
115 |
+
from rich.console import Console
|
116 |
+
console = Console(width=80)
|
117 |
+
tweet1 = result.split('1:**')[1].split('\n\n')[0]
|
118 |
+
tweet2 = result.split('2:**')[1].split('\n\n')[0]
|
119 |
+
tweet3 = result.split('3:**')[1]
|
120 |
+
console.print(tweet1)
|
121 |
+
console.rule()
|
122 |
+
console.print(tweet2)
|
123 |
+
console.rule()
|
124 |
+
console.print(tweet3)
|
125 |
+
console.rule()
|
126 |
+
return tweet1,tweet2, tweet3
|
127 |
+
|
128 |
+
#OR
|
129 |
+
def createTweets2(token,bruteText):
|
130 |
+
# Using https://huggingface.co/spaces/Qwen/Qwen2.5-72B-Instruct
|
131 |
+
Tweet_prompt = f"Read the following newsletter. rewrite it into 3 twitter posts in English, in progression.\n---\n{bruteText}"
|
132 |
+
client = Client("Qwen/Qwen2.5-72B-Instruct",hf_token=token)
|
133 |
+
result = client.predict(
|
134 |
+
query=Tweet_prompt,
|
135 |
+
history=[],
|
136 |
+
system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
|
137 |
+
api_name="/model_chat"
|
138 |
+
)
|
139 |
+
twitposts = result[1][0][1]
|
140 |
+
console.print(twitposts)
|
141 |
+
|
142 |
+
tweet1 = twitposts.split('Post 1:')[1].split('\n\n')[0]
|
143 |
+
tweet2 = twitposts.split('Post 2:')[1].split('\n\n')[0]
|
144 |
+
tweet3 = twitposts.split('Post 3:')[1]
|
145 |
+
console.print(tweet1)
|
146 |
+
console.rule()
|
147 |
+
console.print(tweet2)
|
148 |
+
console.rule()
|
149 |
+
console.print(tweet3)
|
150 |
+
console.rule()
|
151 |
+
return twitposts
|
152 |
+
|
153 |
+
|
154 |
+
with gr.Blocks(fill_width=True,theme=theme) as demo:
|
155 |
+
# INTERFACE
|
156 |
+
with gr.Row(variant='panel'):
|
157 |
+
with gr.Column(scale=2):
|
158 |
+
gr.Image('gradioLOGO.png',width=260)
|
159 |
+
with gr.Column(scale=4):
|
160 |
+
gr.HTML(
|
161 |
+
f"""<h1 style="text-align:center">Advanced POST creation with GRADIO and HF API</h1>""")
|
162 |
+
alertTEXT = gr.Text("⚠️✅You don't have a Hugging Face Token set",container=False,show_label=False,)
|
163 |
+
with gr.Column(scale=2):
|
164 |
+
TOKEN = gr.Textbox(lines=1,label='Your HF token',scale=1)
|
165 |
+
btn_token = gr.Button("Validate HF token", variant='secondary',size='lg',scale=1)
|
166 |
+
|
167 |
+
|
168 |
+
with gr.Row(visible=False) as row1:
|
169 |
+
#HYPERPARAMETERS
|
170 |
+
with gr.Column(scale=1):
|
171 |
+
CREATE_SDP = gr.Button(variant='huggingface',value='Generate Prompt')
|
172 |
+
GEN_IMAGE = gr.Button(value='Generate Image',variant='primary')
|
173 |
+
gr.Markdown('---')
|
174 |
+
OPEN_FOLDER = gr.Button(variant='secondary',value='Open Image Folder')
|
175 |
+
clear = gr.ClearButton()
|
176 |
+
#CHATBOT AREA
|
177 |
+
with gr.Column(scale=3):
|
178 |
+
headers = gr.Textbox(lines=8,label='Header of the Article')
|
179 |
+
|
180 |
+
with gr.Row(visible=False) as row2:
|
181 |
+
with gr.Column(scale=2):
|
182 |
+
SDPrompt = gr.Textbox(lines=8,label='Generated prompt Stable Diffusion')
|
183 |
+
ImageFilename = gr.Textbox(lines=2,label='Generated Image Filename',show_copy_button=True)
|
184 |
+
with gr.Column(scale=3):
|
185 |
+
SDImage = gr.Image(type='pil',label='Generated Image',show_download_button=True, show_fullscreen_button=True,)
|
186 |
+
|
187 |
+
with gr.Row(visible=False) as row3:
|
188 |
+
gr.Markdown('---')
|
189 |
+
|
190 |
+
with gr.Row(visible=False) as row4:
|
191 |
+
#TWITTERPOSTS CREATION SECTION
|
192 |
+
with gr.Column(scale=2):
|
193 |
+
body = gr.Textbox(lines=12,label='Body of the Article')
|
194 |
+
CREATE_TWEET = gr.Button(variant='huggingface',value='Generate Tweets')
|
195 |
+
#TWEET RESULTS AREA
|
196 |
+
with gr.Column(scale=1):
|
197 |
+
tweets1 = gr.Textbox(lines=5,label='🐦 TWEET #1 - 1️⃣',show_copy_button=True)
|
198 |
+
tweets2 = gr.Textbox(lines=5,label='🐦 TWEET #2 - 2️⃣',show_copy_button=True)
|
199 |
+
tweets3 = gr.Textbox(lines=5,label='🐦 TWEET #3 - 3️⃣',show_copy_button=True)
|
200 |
+
|
201 |
+
CREATE_SDP.click(createSDPrompt, [TOKEN,headers], [SDPrompt])
|
202 |
+
GEN_IMAGE.click(CreateImage, [TOKEN,SDPrompt], [SDImage,ImageFilename]) #CreateImage
|
203 |
+
OPEN_FOLDER.click(openDIR, [], []) #Open Current directory
|
204 |
+
CREATE_TWEET.click(createTweets,[TOKEN,body],[tweets1,tweets2,tweets3])
|
205 |
+
btn_token.click(checkHFT,[TOKEN],[row1,row2,row3,row4,alertTEXT])
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
if __name__ == "__main__":
|
210 |
+
demo.launch()
|