Falln87 commited on
Commit
c2cd1c7
·
verified ·
1 Parent(s): e800547

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
4
+ from diffusers import StableDiffusionPipeline, DiffusionPipeline
5
+ from huggingface_hub import HfApi
6
+
7
+ # Set up Hugging Face API
8
+ api = HfApi()
9
+
10
+ # Define a function to load a language model
11
+ def load_language_model(model_name):
12
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ return model, tokenizer
15
+
16
+ # Define a function to generate text with a language model
17
+ def generate_text(model, tokenizer, prompt):
18
+ inputs = tokenizer(prompt, return_tensors="pt")
19
+ outputs = model(**inputs)
20
+ return tokenizer.decode(outputs.logits[0], skip_special_tokens=True)
21
+
22
+ # Define a function to generate an image with Stable Diffusion
23
+ def generate_image(prompt, model_name):
24
+ pipe = StableDiffusionPipeline.from_pretrained(model_name)
25
+ image = pipe(prompt, num_inference_steps=50).images[0]
26
+ return image
27
+
28
+ # Define a function to generate video or music with other diffusion models
29
+ def generate_media(prompt, model_name, media_type):
30
+ pipe = DiffusionPipeline.from_pretrained(model_name)
31
+ if media_type == "video":
32
+ output = pipe(prompt, num_inference_steps=50).videos[0]
33
+ elif media_type == "music":
34
+ output = pipe(prompt, num_inference_steps=50).audios[0]
35
+ return output
36
+
37
+ # Create a Gradio interface
38
+ with gr.Blocks() as demo:
39
+ with gr.Tab("Chat"):
40
+ with gr.Row():
41
+ language_model_input = gr.Textbox(label="Language Model")
42
+ query_button = gr.Button("Query HuggingFace Hub")
43
+ chat_input = gr.Textbox(label="Chat Input")
44
+ chat_output = gr.Textbox(label="Chat Output")
45
+ generate_button = gr.Button("Generate Text")
46
+
47
+ with gr.Tab("Image Generation"):
48
+ image_input = gr.Textbox(label="Image Prompt")
49
+ image_model_input = gr.Textbox(label="Image Model")
50
+ generate_image_button = gr.Button("Generate Image")
51
+ image_output = gr.Image(label="Generated Image")
52
+
53
+ with gr.Tab("Media Generation"):
54
+ media_input = gr.Textbox(label="Media Prompt")
55
+ media_model_input = gr.Textbox(label="Media Model")
56
+ media_type_input = gr.Radio(label="Media Type", choices=["video", "music"])
57
+ generate_media_button = gr.Button("Generate Media")
58
+ media_output = gr.Video(label="Generated Media") if media_type_input == "video" else gr.Audio(label="Generated Media")
59
+
60
+ # Query Hugging Face Hub for language models
61
+ query_button.click(fn=lambda x: [model.modelId for model in api.list_models(filter=x)], inputs=language_model_input, outputs=language_model_input)
62
+
63
+ # Generate text with a language model
64
+ generate_button.click(fn=generate_text, inputs=[language_model_input, chat_input], outputs=chat_output)
65
+
66
+ # Generate an image with Stable Diffusion
67
+ generate_image_button.click(fn=generate_image, inputs=[image_input, image_model_input], outputs=image_output)
68
+
69
+ # Generate video or music with other diffusion models
70
+ generate_media_button.click(fn=generate_media, inputs=[media_input, media_model_input, media_type_input], outputs=media_output)
71
+
72
+ demo.launch()