NeoPy commited on
Commit
d6a8411
·
verified ·
1 Parent(s): 0fb64a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -147
app.py CHANGED
@@ -1,154 +1,95 @@
1
  import gradio as gr
2
  import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
  }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
 
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
+ import sounddevice as sd
4
+ import scipy.io.wavfile as wavfile
5
+
6
+ # Morse code dictionary
7
+ MORSE_CODE_DICT = {
8
+ 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....',
9
+ 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.',
10
+ 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
11
+ 'Y': '-.--', 'Z': '--..', '0': '-----', '1': '.----', '2': '..---', '3': '...--', '4': '....-',
12
+ '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', ' ': ' '
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ def text_to_morse(text):
16
+ """Convert text to Morse code."""
17
+ text = text.upper()
18
+ morse = []
19
+ for char in text:
20
+ if char in MORSE_CODE_DICT:
21
+ morse.append(MORSE_CODE_DICT[char])
22
+ else:
23
+ morse.append('?')
24
+ return ' '.join(morse)
25
+
26
+ def generate_morse_audio(morse_text, frequency=800, wpm=20, sample_rate=44100):
27
+ """Generate audio for Morse code."""
28
+ # Timing calculations based on WPM (words per minute)
29
+ dit_duration = 1.2 / wpm # Duration of a dit in seconds
30
+ dah_duration = 3 * dit_duration # Duration of a dah
31
+ space_duration = dit_duration # Space between symbols
32
+ word_space_duration = 7 * dit_duration # Space between words
33
+
34
+ # Generate audio samples
35
+ t = np.linspace(0, 1, int(sample_rate * 1), endpoint=False)
36
+ tone = np.sin(2 * np.pi * frequency * t)
37
+
38
+ audio = []
39
+ for symbol in morse_text:
40
+ if symbol == '.':
41
+ # Dit: short beep
42
+ audio.extend(tone[:int(sample_rate * dit_duration)])
43
+ elif symbol == '-':
44
+ # Dah: long beep
45
+ audio.extend(tone[:int(sample_rate * dah_duration)])
46
+ elif symbol == ' ':
47
+ # Space between words
48
+ audio.extend(np.zeros(int(sample_rate * word_space_duration)))
49
+ # Space between symbols
50
+ if symbol != ' ':
51
+ audio.extend(np.zeros(int(sample_rate * space_duration)))
52
+
53
+ audio = np.array(audio)
54
+ # Normalize audio to prevent clipping
55
+ audio = audio / np.max(np.abs(audio)) * 0.8
56
+ # Save to WAV file
57
+ output_file = "morse_audio.wav"
58
+ wavfile.write(output_file, sample_rate, audio.astype(np.float32))
59
+ return output_file
60
+
61
+ def convert_to_morse_and_audio(input_text, frequency, wpm):
62
+ """Convert text to Morse code and generate audio."""
63
+ if not input_text:
64
+ return "Please enter some text.", None
65
+
66
+ # Convert text to Morse code
67
+ morse_text = text_to_morse(input_text)
68
+ # Generate audio
69
+ audio_file = generate_morse_audio(morse_text, frequency, wpm)
70
+ return morse_text, audio_file
71
+
72
+ # Gradio Blocks UI
73
+ with gr.Blocks(title="Text to Morse Code Audio Converter") as demo:
74
+ gr.Markdown("# Text to Morse Code Audio Converter")
75
+ gr.Markdown("Enter text to convert it to Morse code and generate an audio file.")
76
+
77
+ with gr.Row():
78
+ with gr.Column():
79
+ text_input = gr.Textbox(label="Input Text", placeholder="Enter text (e.g., SOS)")
80
+ frequency = gr.Slider(400, 1200, value=800, step=10, label="Audio Frequency (Hz)")
81
+ wpm = gr.Slider(5, 40, value=20, step=1, label="Speed (Words Per Minute)")
82
+ convert_button = gr.Button("Convert")
83
+
84
+ with gr.Column():
85
+ morse_output = gr.Textbox(label="Morse Code", interactive=False)
86
+ audio_output = gr.Audio(label="Morse Code Audio", interactive=False)
87
+
88
+ convert_button.click(
89
+ fn=convert_to_morse_and_audio,
90
+ inputs=[text_input, frequency, wpm],
91
+ outputs=[morse_output, audio_output]
92
  )
93
 
94
  if __name__ == "__main__":
95
+ demo.launch()