meraj12 commited on
Commit
abad73f
·
verified ·
1 Parent(s): 1e1cbf3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -83
app.py CHANGED
@@ -1,84 +1,92 @@
 
1
  import streamlit as st
2
- import os
3
- import cv2
4
- import numpy as np
5
- from PIL import Image
6
- from rembg import remove
7
- from realesrgan import RealESRGAN
8
- from moviepy.editor import ImageSequenceClip
9
-
10
- # Upload section
11
- st.title("🖼️ AI Image & Video Effects Studio")
12
- st.markdown("Upload multiple images, apply cool effects, and export a video!")
13
-
14
- uploaded_files = st.file_uploader("Upload images", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
15
-
16
- # Select effect
17
- effect = st.selectbox("Choose an effect to apply", ["None", "Cartoon", "Edge Detection", "Remove Background", "Face Enhancement"])
18
-
19
- output_dir = "outputs"
20
- os.makedirs(output_dir, exist_ok=True)
21
-
22
- processed_images = []
23
-
24
- def cartoonize(image):
25
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
26
- gray = cv2.medianBlur(gray, 5)
27
- edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
28
- cv2.THRESH_BINARY, 9, 9)
29
- color = cv2.bilateralFilter(image, 9, 300, 300)
30
- cartoon = cv2.bitwise_and(color, color, mask=edges)
31
- return cartoon
32
-
33
- def edge_detect(image):
34
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
35
- edges = cv2.Canny(gray, 100, 200)
36
- return cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
37
-
38
- def enhance_face(image_pil):
39
- from realesrgan import RealESRGAN
40
- model = RealESRGAN("cpu", scale=2)
41
- model.load_weights("RealESRGAN_x2.pth")
42
- return model.predict(image_pil)
43
-
44
- def save_video(images, filename="output_video.mp4"):
45
- clip = ImageSequenceClip(images, fps=1)
46
- clip.write_videofile(filename, codec="libx264")
47
- return filename
48
-
49
- if uploaded_files:
50
- for file in uploaded_files:
51
- img = Image.open(file).convert("RGB")
52
- np_img = np.array(img)
53
- np_img_bgr = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
54
-
55
- if effect == "Cartoon":
56
- result = cartoonize(np_img_bgr)
57
- elif effect == "Edge Detection":
58
- result = edge_detect(np_img_bgr)
59
- elif effect == "Remove Background":
60
- result = remove(np_img)
61
- elif effect == "Face Enhancement":
62
- result = enhance_face(img)
63
- else:
64
- result = np_img_bgr
65
-
66
- result_rgb = cv2.cvtColor(result, cv2.COLOR_BGR2RGB) if isinstance(result, np.ndarray) else result
67
- final_img = Image.fromarray(result_rgb)
68
-
69
- st.image(final_img, caption="Processed Image", use_column_width=True)
70
-
71
- img_path = os.path.join(output_dir, f"{file.name}")
72
- final_img.save(img_path)
73
- processed_images.append(img_path)
74
-
75
- # Convert to video
76
- if st.button("🎞️ Convert to Video Slideshow"):
77
- video_path = save_video(processed_images)
78
- st.video(video_path)
79
-
80
- # Download
81
- for img_path in processed_images:
82
- with open(img_path, "rb") as f:
83
- st.download_button("Download Image", f, file_name=os.path.basename(img_path))
84
-
 
 
 
 
 
 
 
 
1
+ # app.py
2
  import streamlit as st
3
+ from diffusers import StableDiffusionPipeline
4
+ import torch
5
+
6
+ # Cache the model loading to avoid reloading on every interaction
7
+ @st.cache_resource
8
+ def load_model():
9
+ model_id = "CompVis/stable-diffusion-v1-4"
10
+ pipe = StableDiffusionPipeline.from_pretrained(
11
+ model_id,
12
+ torch_dtype=torch.float32,
13
+ use_safetensors=True,
14
+ safety_checker=None
15
+ )
16
+ pipe = pipe.to("cpu")
17
+ pipe.enable_attention_slicing() # Reduce memory usage
18
+ return pipe
19
+
20
+ def main():
21
+ st.set_page_config(page_title="Poetry to Image", page_icon="🎨")
22
+
23
+ st.title("✨ Romantic Text to Image Converter")
24
+ st.markdown("Turn your poetic words into beautiful images (CPU version)")
25
+
26
+ # Initialize session state
27
+ if 'generated_image' not in st.session_state:
28
+ st.session_state.generated_image = None
29
+ if 'error' not in st.session_state:
30
+ st.session_state.error = None
31
+
32
+ # Sidebar with settings
33
+ with st.sidebar:
34
+ st.header("Settings")
35
+ num_steps = st.slider("Inference Steps", 10, 50, 25)
36
+ guidance_scale = st.slider("Guidance Scale", 3.0, 15.0, 7.5)
37
+
38
+ # Main interface
39
+ prompt = st.text_area(
40
+ "Enter your romantic text or poetry:",
41
+ height=150,
42
+ placeholder="Example: 'Your eyes sparkle like stars in the night sky...'"
43
+ )
44
+
45
+ col1, col2 = st.columns([1, 3])
46
+ with col1:
47
+ if st.button("Generate Image", type="primary"):
48
+ st.session_state.generated_image = None
49
+ st.session_state.error = None
50
+
51
+ if not prompt.strip():
52
+ st.session_state.error = "Please enter some text first!"
53
+ else:
54
+ try:
55
+ with st.spinner("Creating your artwork... (this may take a few minutes)"):
56
+ pipe = load_model()
57
+ image = pipe(
58
+ prompt,
59
+ num_inference_steps=num_steps,
60
+ guidance_scale=guidance_scale
61
+ ).images[0]
62
+
63
+ st.session_state.generated_image = image
64
+ except Exception as e:
65
+ st.session_state.error = f"Error: {str(e)}"
66
+
67
+ # Display results
68
+ if st.session_state.error:
69
+ st.error(st.session_state.error)
70
+
71
+ if st.session_state.generated_image:
72
+ st.subheader("Generated Image")
73
+ st.image(
74
+ st.session_state.generated_image,
75
+ use_column_width=True,
76
+ caption="Your generated artwork"
77
+ )
78
+
79
+ # Add download button
80
+ buf = io.BytesIO()
81
+ st.session_state.generated_image.save(buf, format="PNG")
82
+ byte_im = buf.getvalue()
83
+
84
+ st.download_button(
85
+ label="Download Image",
86
+ data=byte_im,
87
+ file_name="poetry_image.png",
88
+ mime="image/png"
89
+ )
90
+
91
+ if __name__ == "__main__":
92
+ main()