Spaces:
Sleeping
Sleeping
Commit
Β·
f2a04c1
1
Parent(s):
f82de34
[Update]: Refined app.py for enhanced GPU processing and memory operation management π
Browse files- Added: GPU acceleration to the `process_memory_operation` function for improved performance.
- Updated: Error handling to gracefully fall back to CPU in case of GPU memory issues.
- Improved: Visualization and metrics formatting for better clarity in results.
- Added: New function `generate_art_with_gpu` to streamline artistic generation using GPU.
- Pro Tip of the Commit: When the waves get heavy, letβs keep our GPUs cool and our operations smooth! πβοΈ
Aye, Aye! π’
app.py
CHANGED
@@ -728,107 +728,133 @@ class MemoryWave:
|
|
728 |
|
729 |
return filepath
|
730 |
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
741 |
-
|
742 |
-
|
743 |
-
|
744 |
-
|
745 |
-
|
746 |
-
|
747 |
-
|
748 |
-
|
749 |
-
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
if operation == "wave_memory":
|
757 |
-
result = self.generate_wave_memory(emotion_valence)
|
758 |
-
wave_title = "Wave Memory Pattern"
|
759 |
-
wave_data = result["wave"]
|
760 |
-
|
761 |
-
elif operation == "interference":
|
762 |
-
result = self.generate_interference_pattern(emotion_valence)
|
763 |
-
wave_title = "Interference Pattern"
|
764 |
-
wave_data = result["wave"]
|
765 |
-
|
766 |
-
elif operation == "resonance":
|
767 |
-
result = self.generate_resonance_pattern(emotion_valence)
|
768 |
-
wave_title = "Resonance Pattern"
|
769 |
-
wave_data = result["wave"]
|
770 |
-
|
771 |
-
elif operation == "reconstruction":
|
772 |
-
result = self.generate_memory_reconstruction(emotion_valence)
|
773 |
-
wave_title = "Memory Reconstruction"
|
774 |
-
wave_data = result["reconstructed"]
|
775 |
-
|
776 |
-
elif operation == "hot_tub":
|
777 |
-
result = self.generate_hot_tub_simulation(
|
778 |
-
emotion_valence, comfort_level, exploration_depth
|
779 |
-
)
|
780 |
-
wave_title = "Hot Tub Exploration"
|
781 |
-
wave_data = result["safe_result"]
|
782 |
-
|
783 |
-
# Create visualizations
|
784 |
-
wave_plot = self.visualize_wave_pattern(wave_data, wave_title)
|
785 |
-
emotion_plot = self.visualize_emotional_history()
|
786 |
-
|
787 |
-
# Generate artistic visualization if requested
|
788 |
-
art_output = None
|
789 |
-
if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None:
|
790 |
-
prompt = generate_memory_prompt(operation, emotion_valence)
|
791 |
-
generator = torch.Generator().manual_seed(seed)
|
792 |
-
art_output = pipe(
|
793 |
-
prompt=prompt,
|
794 |
-
negative_prompt="text, watermark, signature, blurry, distorted",
|
795 |
-
guidance_scale=1.5,
|
796 |
-
num_inference_steps=8,
|
797 |
-
width=768,
|
798 |
-
height=768,
|
799 |
-
generator=generator,
|
800 |
-
).images[0]
|
801 |
-
|
802 |
-
# Format metrics for display
|
803 |
-
metrics = result["metrics"]
|
804 |
-
metrics_str = "π Analysis Results:\n\n"
|
805 |
-
for key, value in metrics.items():
|
806 |
-
metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n"
|
807 |
|
808 |
-
|
809 |
-
|
810 |
-
|
|
|
811 |
|
812 |
-
|
813 |
-
|
814 |
-
|
|
|
815 |
|
816 |
-
|
|
|
|
|
|
|
817 |
|
818 |
-
|
819 |
-
|
820 |
-
|
821 |
-
if pipe is not None:
|
822 |
-
pipe.to("cpu")
|
823 |
-
return self.process_memory_operation(
|
824 |
-
operation, emotion_valence, grid_size,
|
825 |
-
comfort_level, exploration_depth,
|
826 |
-
generate_art, seed
|
827 |
)
|
828 |
-
|
829 |
-
|
830 |
-
|
831 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
832 |
|
833 |
def generate_memory_prompt(operation: str, emotion_valence: float) -> str:
|
834 |
"""Generate an artistic prompt based on the memory operation and emotional context."""
|
@@ -955,7 +981,7 @@ def create_interface():
|
|
955 |
|
956 |
# Set up event handlers
|
957 |
run_btn.click(
|
958 |
-
|
959 |
inputs=[
|
960 |
operation_input,
|
961 |
emotion_input,
|
|
|
728 |
|
729 |
return filepath
|
730 |
|
731 |
+
@spaces.GPU
|
732 |
+
def process_memory_operation(
|
733 |
+
memory_wave: MemoryWave,
|
734 |
+
operation: str,
|
735 |
+
emotion_valence: float,
|
736 |
+
grid_size: int = DEFAULT_GRID_SIZE,
|
737 |
+
comfort_level: float = 0.8,
|
738 |
+
exploration_depth: float = 0.5,
|
739 |
+
generate_art: bool = True,
|
740 |
+
seed: int = 42
|
741 |
+
) -> Tuple[str, go.Figure, go.Figure, Optional[np.ndarray]]:
|
742 |
+
"""Process memory operations with GPU acceleration."""
|
743 |
+
try:
|
744 |
+
# Move to GPU
|
745 |
+
memory_wave.to("cuda")
|
746 |
+
|
747 |
+
# Resize grid if needed
|
748 |
+
if grid_size != memory_wave.size:
|
749 |
+
memory_wave.__init__(size=grid_size, device="cuda")
|
750 |
+
|
751 |
+
# Process based on operation type
|
752 |
+
if operation == "wave_memory":
|
753 |
+
result = memory_wave.generate_wave_memory(emotion_valence)
|
754 |
+
wave_title = "Wave Memory Pattern"
|
755 |
+
wave_data = result["wave"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
756 |
|
757 |
+
elif operation == "interference":
|
758 |
+
result = memory_wave.generate_interference_pattern(emotion_valence)
|
759 |
+
wave_title = "Interference Pattern"
|
760 |
+
wave_data = result["wave"]
|
761 |
|
762 |
+
elif operation == "resonance":
|
763 |
+
result = memory_wave.generate_resonance_pattern(emotion_valence)
|
764 |
+
wave_title = "Resonance Pattern"
|
765 |
+
wave_data = result["wave"]
|
766 |
|
767 |
+
elif operation == "reconstruction":
|
768 |
+
result = memory_wave.generate_memory_reconstruction(emotion_valence)
|
769 |
+
wave_title = "Memory Reconstruction"
|
770 |
+
wave_data = result["reconstructed"]
|
771 |
|
772 |
+
elif operation == "hot_tub":
|
773 |
+
result = memory_wave.generate_hot_tub_simulation(
|
774 |
+
emotion_valence, comfort_level, exploration_depth
|
|
|
|
|
|
|
|
|
|
|
|
|
775 |
)
|
776 |
+
wave_title = "Hot Tub Exploration"
|
777 |
+
wave_data = result["safe_result"]
|
778 |
+
|
779 |
+
# Create visualizations
|
780 |
+
wave_plot = memory_wave.visualize_wave_pattern(wave_data, wave_title)
|
781 |
+
emotion_plot = memory_wave.visualize_emotional_history()
|
782 |
+
|
783 |
+
# Generate artistic visualization if requested
|
784 |
+
art_output = None
|
785 |
+
if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None:
|
786 |
+
prompt = generate_memory_prompt(operation, emotion_valence)
|
787 |
+
art_output = generate_art_with_gpu(prompt, seed)
|
788 |
+
|
789 |
+
# Format metrics for display
|
790 |
+
metrics = result["metrics"]
|
791 |
+
metrics_str = "π Analysis Results:\n\n"
|
792 |
+
for key, value in metrics.items():
|
793 |
+
metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n"
|
794 |
+
|
795 |
+
metrics_str += f"\nπ Emotional Context:\n"
|
796 |
+
metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n"
|
797 |
+
metrics_str += f"β’ Arousal: {result['emotion']['arousal']:.2f}\n"
|
798 |
+
|
799 |
+
# Save memory snapshot
|
800 |
+
snapshot_path = memory_wave.save_memory_snapshot(operation)
|
801 |
+
metrics_str += f"\nπΎ Memory snapshot saved: {snapshot_path}"
|
802 |
+
|
803 |
+
# Move back to CPU
|
804 |
+
memory_wave.to("cpu")
|
805 |
+
|
806 |
+
return metrics_str, wave_plot, emotion_plot, art_output
|
807 |
+
|
808 |
+
except torch.cuda.OutOfMemoryError:
|
809 |
+
print("β οΈ GPU out of memory - falling back to CPU")
|
810 |
+
memory_wave.to("cpu")
|
811 |
+
if pipe is not None:
|
812 |
+
pipe.to("cpu")
|
813 |
+
return process_memory_operation(
|
814 |
+
memory_wave, operation, emotion_valence, grid_size,
|
815 |
+
comfort_level, exploration_depth, generate_art, seed
|
816 |
+
)
|
817 |
+
|
818 |
+
except Exception as e:
|
819 |
+
print(f"β Error during processing: {e}")
|
820 |
+
# Ensure we're back on CPU
|
821 |
+
memory_wave.to("cpu")
|
822 |
+
return None, None, None, None
|
823 |
+
|
824 |
+
@spaces.GPU
|
825 |
+
def generate_art_with_gpu(prompt: str, seed: int = 42) -> Optional[np.ndarray]:
|
826 |
+
"""Generate art using Stable Diffusion with GPU acceleration."""
|
827 |
+
if not STABLE_DIFFUSION_AVAILABLE or pipe is None:
|
828 |
+
return None
|
829 |
+
|
830 |
+
try:
|
831 |
+
# Move to GPU and optimize
|
832 |
+
pipe.to("cuda")
|
833 |
+
pipe.enable_model_cpu_offload()
|
834 |
+
pipe.enable_vae_slicing()
|
835 |
+
pipe.enable_vae_tiling()
|
836 |
+
pipe.enable_attention_slicing(slice_size="max")
|
837 |
+
|
838 |
+
# Generate image
|
839 |
+
generator = torch.Generator().manual_seed(seed)
|
840 |
+
image = pipe(
|
841 |
+
prompt=prompt,
|
842 |
+
negative_prompt="text, watermark, signature, blurry, distorted",
|
843 |
+
guidance_scale=1.5,
|
844 |
+
num_inference_steps=8,
|
845 |
+
width=768,
|
846 |
+
height=768,
|
847 |
+
generator=generator,
|
848 |
+
).images[0]
|
849 |
+
|
850 |
+
# Move back to CPU
|
851 |
+
pipe.to("cpu")
|
852 |
+
return image
|
853 |
+
|
854 |
+
except Exception as e:
|
855 |
+
print(f"β Error generating art: {e}")
|
856 |
+
pipe.to("cpu")
|
857 |
+
return None
|
858 |
|
859 |
def generate_memory_prompt(operation: str, emotion_valence: float) -> str:
|
860 |
"""Generate an artistic prompt based on the memory operation and emotional context."""
|
|
|
981 |
|
982 |
# Set up event handlers
|
983 |
run_btn.click(
|
984 |
+
process_memory_operation, # Use the standalone function
|
985 |
inputs=[
|
986 |
operation_input,
|
987 |
emotion_input,
|