Spaces:
Sleeping
Sleeping
Commit
Β·
d8cb7f3
1
Parent(s):
4c38863
[Update]: Enhanced app.py for improved GPU handling and memory operation processing π
Browse files- Added: Dynamic GPU management to ensure operations run smoothly on available devices.
- Updated: MemoryWave class to streamline memory operation processing with enhanced error handling and visualization features.
- Improved: Gradio interface to directly utilize the MemoryWave class method for processing, enhancing clarity and performance.
- Pro Tip of the Commit: When it comes to memory operations, letβs keep the waves flowing and the devices dancing! ππ
Aye, Aye! π’
app.py
CHANGED
@@ -764,72 +764,95 @@ class MemoryWave:
|
|
764 |
) -> Tuple[str, go.Figure, go.Figure, Optional[np.ndarray]]:
|
765 |
"""Process a memory operation and return visualizations."""
|
766 |
|
767 |
-
#
|
768 |
-
|
769 |
-
|
770 |
-
|
771 |
-
|
772 |
-
|
773 |
-
|
774 |
-
|
775 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
776 |
|
777 |
-
|
778 |
-
|
779 |
-
|
780 |
-
wave_data = result["wave"]
|
781 |
|
782 |
-
|
783 |
-
|
784 |
-
|
785 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
786 |
|
787 |
-
|
788 |
-
|
789 |
-
|
790 |
-
|
|
|
791 |
|
792 |
-
|
793 |
-
|
794 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
795 |
)
|
796 |
-
|
797 |
-
|
798 |
-
|
799 |
-
|
800 |
-
wave_plot = self.visualize_wave_pattern(wave_data, wave_title)
|
801 |
-
emotion_plot = self.visualize_emotional_history()
|
802 |
-
|
803 |
-
# Generate artistic visualization if requested
|
804 |
-
art_output = None
|
805 |
-
if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None:
|
806 |
-
prompt = generate_memory_prompt(operation, emotion_valence)
|
807 |
-
generator = torch.Generator().manual_seed(seed)
|
808 |
-
art_output = pipe(
|
809 |
-
prompt=prompt,
|
810 |
-
negative_prompt="text, watermark, signature, blurry, distorted",
|
811 |
-
guidance_scale=1.5,
|
812 |
-
num_inference_steps=8,
|
813 |
-
width=768,
|
814 |
-
height=768,
|
815 |
-
generator=generator,
|
816 |
-
).images[0]
|
817 |
-
|
818 |
-
# Format metrics for display
|
819 |
-
metrics = result["metrics"]
|
820 |
-
metrics_str = "π Analysis Results:\n\n"
|
821 |
-
for key, value in metrics.items():
|
822 |
-
metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n"
|
823 |
-
|
824 |
-
metrics_str += f"\nπ Emotional Context:\n"
|
825 |
-
metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n"
|
826 |
-
metrics_str += f"β’ Arousal: {result['emotion']['arousal']:.2f}\n"
|
827 |
-
|
828 |
-
# Save memory snapshot
|
829 |
-
snapshot_path = self.save_memory_snapshot(operation)
|
830 |
-
metrics_str += f"\nπΎ Memory snapshot saved: {snapshot_path}"
|
831 |
-
|
832 |
-
return metrics_str, wave_plot, emotion_plot, art_output
|
833 |
|
834 |
def to(self, device_str):
|
835 |
"""Move the wave system to a different device."""
|
@@ -887,31 +910,6 @@ def create_interface():
|
|
887 |
device_str = get_device()
|
888 |
memory_wave = MemoryWave(device_str=device_str)
|
889 |
|
890 |
-
def process_with_gpu(*args, **kwargs):
|
891 |
-
"""Process operations on GPU with optimized memory handling."""
|
892 |
-
device_str = get_device()
|
893 |
-
|
894 |
-
try:
|
895 |
-
# Ensure we're on GPU if available
|
896 |
-
if device_str == "cuda" and memory_wave.device != "cuda":
|
897 |
-
memory_wave.to("cuda")
|
898 |
-
if pipe is not None and pipe.device.type != "cuda":
|
899 |
-
pipe.to("cuda")
|
900 |
-
|
901 |
-
result = memory_wave.process_memory_operation(*args, **kwargs)
|
902 |
-
return result
|
903 |
-
|
904 |
-
except torch.cuda.OutOfMemoryError:
|
905 |
-
print("β οΈ GPU out of memory - falling back to CPU")
|
906 |
-
memory_wave.to("cpu")
|
907 |
-
if pipe is not None:
|
908 |
-
pipe.to("cpu")
|
909 |
-
return memory_wave.process_memory_operation(*args, **kwargs)
|
910 |
-
|
911 |
-
except Exception as e:
|
912 |
-
print(f"β Error during processing: {e}")
|
913 |
-
return None
|
914 |
-
|
915 |
# Create the interface
|
916 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue")) as demo:
|
917 |
gr.Markdown("""
|
@@ -994,7 +992,7 @@ def create_interface():
|
|
994 |
|
995 |
# Set up event handlers
|
996 |
run_btn.click(
|
997 |
-
|
998 |
inputs=[
|
999 |
operation_input,
|
1000 |
emotion_input,
|
@@ -1039,6 +1037,7 @@ if __name__ == "__main__":
|
|
1039 |
demo.launch(
|
1040 |
server_name="0.0.0.0", # Listen on all interfaces
|
1041 |
server_port=7860, # Default Spaces port
|
1042 |
-
share=
|
1043 |
-
show_api=False # Hide API docs
|
|
|
1044 |
)
|
|
|
764 |
) -> Tuple[str, go.Figure, go.Figure, Optional[np.ndarray]]:
|
765 |
"""Process a memory operation and return visualizations."""
|
766 |
|
767 |
+
# Ensure we're on GPU if available
|
768 |
+
device_str = get_device()
|
769 |
+
if device_str == "cuda" and self.device != "cuda":
|
770 |
+
self.to("cuda")
|
771 |
+
if pipe is not None and pipe.device.type != "cuda":
|
772 |
+
pipe.to("cuda")
|
773 |
+
|
774 |
+
try:
|
775 |
+
# Resize grid if needed
|
776 |
+
if grid_size != self.size:
|
777 |
+
self.__init__(size=grid_size, device=self.device)
|
778 |
+
|
779 |
+
# Process based on operation type
|
780 |
+
if operation == "wave_memory":
|
781 |
+
result = self.generate_wave_memory(emotion_valence)
|
782 |
+
wave_title = "Wave Memory Pattern"
|
783 |
+
wave_data = result["wave"]
|
784 |
+
|
785 |
+
elif operation == "interference":
|
786 |
+
result = self.generate_interference_pattern(emotion_valence)
|
787 |
+
wave_title = "Interference Pattern"
|
788 |
+
wave_data = result["wave"]
|
789 |
+
|
790 |
+
elif operation == "resonance":
|
791 |
+
result = self.generate_resonance_pattern(emotion_valence)
|
792 |
+
wave_title = "Resonance Pattern"
|
793 |
+
wave_data = result["wave"]
|
794 |
+
|
795 |
+
elif operation == "reconstruction":
|
796 |
+
result = self.generate_memory_reconstruction(emotion_valence)
|
797 |
+
wave_title = "Memory Reconstruction"
|
798 |
+
wave_data = result["reconstructed"]
|
799 |
+
|
800 |
+
elif operation == "hot_tub":
|
801 |
+
result = self.generate_hot_tub_simulation(
|
802 |
+
emotion_valence, comfort_level, exploration_depth
|
803 |
+
)
|
804 |
+
wave_title = "Hot Tub Exploration"
|
805 |
+
wave_data = result["safe_result"]
|
806 |
|
807 |
+
# Create visualizations
|
808 |
+
wave_plot = self.visualize_wave_pattern(wave_data, wave_title)
|
809 |
+
emotion_plot = self.visualize_emotional_history()
|
|
|
810 |
|
811 |
+
# Generate artistic visualization if requested
|
812 |
+
art_output = None
|
813 |
+
if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None:
|
814 |
+
prompt = generate_memory_prompt(operation, emotion_valence)
|
815 |
+
generator = torch.Generator().manual_seed(seed)
|
816 |
+
art_output = pipe(
|
817 |
+
prompt=prompt,
|
818 |
+
negative_prompt="text, watermark, signature, blurry, distorted",
|
819 |
+
guidance_scale=1.5,
|
820 |
+
num_inference_steps=8,
|
821 |
+
width=768,
|
822 |
+
height=768,
|
823 |
+
generator=generator,
|
824 |
+
).images[0]
|
825 |
|
826 |
+
# Format metrics for display
|
827 |
+
metrics = result["metrics"]
|
828 |
+
metrics_str = "π Analysis Results:\n\n"
|
829 |
+
for key, value in metrics.items():
|
830 |
+
metrics_str += f"β’ {key.replace('_', ' ').title()}: {value:.4f}\n"
|
831 |
|
832 |
+
metrics_str += f"\nπ Emotional Context:\n"
|
833 |
+
metrics_str += f"β’ Valence: {result['emotion']['valence']:.2f}\n"
|
834 |
+
metrics_str += f"β’ Arousal: {result['emotion']['arousal']:.2f}\n"
|
835 |
+
|
836 |
+
# Save memory snapshot
|
837 |
+
snapshot_path = self.save_memory_snapshot(operation)
|
838 |
+
metrics_str += f"\nπΎ Memory snapshot saved: {snapshot_path}"
|
839 |
+
|
840 |
+
return metrics_str, wave_plot, emotion_plot, art_output
|
841 |
+
|
842 |
+
except torch.cuda.OutOfMemoryError:
|
843 |
+
print("β οΈ GPU out of memory - falling back to CPU")
|
844 |
+
self.to("cpu")
|
845 |
+
if pipe is not None:
|
846 |
+
pipe.to("cpu")
|
847 |
+
return self.process_memory_operation(
|
848 |
+
operation, emotion_valence, grid_size,
|
849 |
+
comfort_level, exploration_depth,
|
850 |
+
generate_art, seed
|
851 |
)
|
852 |
+
|
853 |
+
except Exception as e:
|
854 |
+
print(f"β Error during processing: {e}")
|
855 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
856 |
|
857 |
def to(self, device_str):
|
858 |
"""Move the wave system to a different device."""
|
|
|
910 |
device_str = get_device()
|
911 |
memory_wave = MemoryWave(device_str=device_str)
|
912 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
913 |
# Create the interface
|
914 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue")) as demo:
|
915 |
gr.Markdown("""
|
|
|
992 |
|
993 |
# Set up event handlers
|
994 |
run_btn.click(
|
995 |
+
memory_wave.process_memory_operation, # Use the class method directly
|
996 |
inputs=[
|
997 |
operation_input,
|
998 |
emotion_input,
|
|
|
1037 |
demo.launch(
|
1038 |
server_name="0.0.0.0", # Listen on all interfaces
|
1039 |
server_port=7860, # Default Spaces port
|
1040 |
+
share=True, # Don't create public link
|
1041 |
+
show_api=False, # Hide API docs
|
1042 |
+
max_threads=10 # Limit to 10 threads
|
1043 |
)
|