Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,132 +1,63 @@
|
|
1 |
import gradio as gr
|
2 |
import argparse
|
3 |
-
from tabulate import tabulate
|
4 |
|
5 |
def main():
|
6 |
-
parser = argparse.ArgumentParser(description='
|
7 |
-
parser.add_argument('--
|
8 |
-
parser.add_argument('--
|
9 |
-
parser.add_argument('--
|
10 |
-
parser.add_argument('--
|
11 |
-
parser.add_argument('--
|
12 |
|
13 |
args = parser.parse_args()
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
avg_context_window = args.ctx_window
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
{"name": "A30", "fp16_tflops": 330, "memory_gb": 24, "memory_bandwidth_gbps": 933},
|
29 |
-
{"name": "L40", "fp16_tflops": 181, "memory_gb": 48, "memory_bandwidth_gbps": 864},
|
30 |
-
{"name": "L40s", "fp16_tflops": 362, "memory_gb": 48, "memory_bandwidth_gbps": 864},
|
31 |
-
{"name": "A100 40 GB", "fp16_tflops": 312, "memory_gb": 40, "memory_bandwidth_gbps": 1555},
|
32 |
-
{"name": "A100 40 GB SXM", "fp16_tflops": 312, "memory_gb": 40, "memory_bandwidth_gbps": 1555},
|
33 |
-
{"name": "A100 80 GB PCIe", "fp16_tflops": 312, "memory_gb": 80, "memory_bandwidth_gbps": 1935},
|
34 |
-
{"name": "A100 80 GB SXM", "fp16_tflops": 312, "memory_gb": 80, "memory_bandwidth_gbps": 2039},
|
35 |
-
{"name": "H100 PCIe", "fp16_tflops": 1513, "memory_gb": 80, "memory_bandwidth_gbps": 2000},
|
36 |
-
{"name": "H100 SXM", "fp16_tflops": 1979, "memory_gb": 80, "memory_bandwidth_gbps": 3350},
|
37 |
-
{"name": "H100 NVL", "fp16_tflops": 3958, "memory_gb": 188, "memory_bandwidth_gbps": 7800}
|
38 |
-
# Add or comment out GPU types as needed
|
39 |
-
]
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
{"name": "Llama-3.1-70B", "params_billion": 70, "d_model": 8192, "n_heads": 64, "n_layers": 80, "max_context_window": 131072, "d_head": 128},
|
46 |
-
{"name": "Mistral-7B-v0.3", "params_billion": 7, "d_model": 4096, "n_heads": 32, "n_layers": 32, "max_context_window": 32768, "d_head": 128},
|
47 |
-
{"name": "Falcon-7B", "params_billion": 7, "d_model": 4544, "n_heads": 71, "n_layers": 32, "max_context_window": 2048, "d_head": 64},
|
48 |
-
{"name": "Falcon-40B", "params_billion": 40, "d_model": 8192, "n_heads": 128, "n_layers": 60, "max_context_window": 2048, "d_head": 64},
|
49 |
-
{"name": "Falcon-180B", "params_billion": 180, "d_model": 14848, "n_heads": 232, "n_layers": 80, "max_context_window": 2048, "d_head": 64}
|
50 |
-
# Add or comment out model specifications as needed
|
51 |
-
]
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
target_gpu_mem = kv_cache_size_per_token * avg_context_window * n_concurrent_request + model_spec["params_billion"] * 2
|
62 |
-
return target_gpu_mem
|
63 |
-
|
64 |
-
print(f"\n******************** Estimate LLM Memory Footprint ********************")
|
65 |
-
memory_footprint_table = []
|
66 |
-
for model_spec in model_specs:
|
67 |
-
kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
|
68 |
-
memory_footprint = calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window)
|
69 |
-
memory_footprint_table.append([model_spec['name'], f"{kv_cache_size_per_token:.6f} GiB/token", f"{memory_footprint:.2f} GB"])
|
70 |
-
print(tabulate(memory_footprint_table, headers=['Model', 'KV Cache Size per Token', 'Memory Footprint'], tablefmt='orgtbl'))
|
71 |
-
|
72 |
-
def calc_kv_cache_tokens(num_gpu, gpu_memory_gb, model_params_billion, kv_cache_size):
|
73 |
-
result = (num_gpu * gpu_memory_gb - 2 * model_params_billion) / kv_cache_size
|
74 |
-
return result if result >= 0 else "OOM"
|
75 |
-
|
76 |
-
def calc_prefill_time_per_token(num_gpu, model_params_billion, fp16_tflops):
|
77 |
-
result = (2 * model_params_billion / num_gpu) / fp16_tflops
|
78 |
-
return result if result >= 0 else "OOM"
|
79 |
-
|
80 |
-
def calc_generation_time_per_token(num_gpu, model_params_billion, memory_bandwidth_gbps):
|
81 |
-
result = (2 * model_params_billion / num_gpu) / memory_bandwidth_gbps * 1000
|
82 |
-
return result if result >= 0 else "OOM"
|
83 |
-
|
84 |
-
def calc_estimated_response_time(prefill_time, generation_time, prompt_size, response_size):
|
85 |
-
if isinstance(prefill_time, str) or isinstance(generation_time, str): # Check if any are "NA"
|
86 |
-
return "OOM"
|
87 |
-
return (prompt_size * prefill_time + response_size * generation_time) / 1000 # convert ms to seconds
|
88 |
-
|
89 |
-
print(f"\n******************** Estimate LLM Capacity and Latency ******************** ")
|
90 |
-
capacity_latency_table = []
|
91 |
-
for model in model_specs:
|
92 |
-
# print(f"Model: {model['name']} ({model['params_billion']}B parameters)")
|
93 |
-
kv_cache_size = calc_kv_cache_size_per_token(model['n_layers'], model['d_model'])
|
94 |
-
for gpu in gpu_specs:
|
95 |
-
kv_cache_tokens = calc_kv_cache_tokens(num_gpu, gpu['memory_gb'], model['params_billion'], kv_cache_size)
|
96 |
-
prefill_time_per_token = calc_prefill_time_per_token(num_gpu, model['params_billion'], gpu['fp16_tflops'])
|
97 |
-
generation_time_per_token = calc_generation_time_per_token(num_gpu, model['params_billion'], gpu['memory_bandwidth_gbps'])
|
98 |
-
estimated_response_time = calc_estimated_response_time(prefill_time_per_token, generation_time_per_token, prompt_size, response_size)
|
99 |
-
capacity_latency_table.append([model['name'], gpu['name'], f"{kv_cache_tokens}", f"{prefill_time_per_token:.3f} ms", f"{generation_time_per_token:.3f} ms", f"{estimated_response_time:.1f} s"])
|
100 |
-
print(tabulate(capacity_latency_table, headers=['Model', 'GPU', 'KV Cache Tokens', 'Prefill Time', 'Generation Time', 'Estimated Response Time'], tablefmt='orgtbl'))
|
101 |
-
|
102 |
-
def generate_output(model_name, gpu_name, kv_cache_tokens, prefill_time, generation_time, estimated_response_time):
|
103 |
-
return {
|
104 |
-
"Model": model_name,
|
105 |
-
"GPU": gpu_name,
|
106 |
-
"KV Cache Tokens": str(kv_cache_tokens),
|
107 |
-
"Prefill Time (ms)": f"{prefill_time:.3f}",
|
108 |
-
"Generation Time (ms)": f"{generation_time:.3f}",
|
109 |
-
"Estimated Response Time (s)": f"{estimated_response_time:.1f}"
|
110 |
-
}
|
111 |
|
112 |
with gr.Blocks() as demo:
|
113 |
-
gr.Markdown("#
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
|
121 |
-
|
122 |
|
123 |
-
|
124 |
|
125 |
submit_button.click(
|
126 |
-
fn=lambda
|
127 |
-
|
128 |
-
|
129 |
-
|
|
|
130 |
outputs=[table]
|
131 |
)
|
132 |
|
|
|
1 |
import gradio as gr
|
2 |
import argparse
|
|
|
3 |
|
4 |
def main():
|
5 |
+
parser = argparse.ArgumentParser(description='Estimare capacità e latenza di un modello LLM')
|
6 |
+
parser.add_argument('--gpu', type=str, default='A100 80GB', help='Tipo di GPU')
|
7 |
+
parser.add_argument('--model', type=str, default='Llama-3-70B', help='Nome del modello')
|
8 |
+
parser.add_argument('--prompt_size', type=int, default=4096, help='Dimensione della promessa in token')
|
9 |
+
parser.add_argument('--response_size', type=int, default=256, help='Dimensione della risposta in token')
|
10 |
+
parser.add_argument('--concurrent_requests', type=int, default=10, help='Numero di richieste concorrenti')
|
11 |
|
12 |
args = parser.parse_args()
|
13 |
|
14 |
+
gpu_specs = {
|
15 |
+
'A100 80GB': {'tflops': 312, 'memory_gb': 80, 'bandwidth': 1935},
|
16 |
+
'H100 SXM': {'tflops': 1979, 'memory_gb': 80, 'bandwidth': 3350},
|
17 |
+
}
|
|
|
18 |
|
19 |
+
model_specs = {
|
20 |
+
'Llama-3-70B': {'params_billion': 70, 'd_model': 8192, 'n_layers': 80},
|
21 |
+
'Llama-3-8B': {'params_billion': 8, 'd_model': 4096, 'n_layers': 32},
|
22 |
+
}
|
23 |
|
24 |
+
def estimate_llm_capacity(model_name, gpu_name, prompt_size, response_size, concurrent_requests):
|
25 |
+
gpu = gpu_specs[gpu_name]
|
26 |
+
model = model_specs[model_name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
kv_cache_tokens = (gpu['tflops'] * concurrent_requests) // (model['params_billion'] * 2)
|
29 |
+
prefill_time_ms = (model['params_billion'] * 2) / (gpu['tflops'] * concurrent_requests) * 1000
|
30 |
+
generation_time_ms = (model['params_billion'] * 2) / (gpu['bandwidth'] * concurrent_requests) * 1000
|
31 |
+
estimated_response_time = (prompt_size * prefill_time_ms + response_size * generation_time_ms) / 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
return f"""
|
34 |
+
Modello: {model_name}
|
35 |
+
GPU: {gpu_name}
|
36 |
+
KV Cache Tokens: {kv_cache_tokens:.0f}
|
37 |
+
Prefill Time: {prefill_time_ms:.2f} ms
|
38 |
+
Generation Time: {generation_time_ms:.2f} ms
|
39 |
+
Estimated Response Time: {estimated_response_time:.2f} s
|
40 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
with gr.Blocks() as demo:
|
43 |
+
gr.Markdown("# Estimare Capacità e Latenza LLM")
|
44 |
|
45 |
+
gpu_dropdown = gr.Dropdown(choices=['A100 80GB', 'H100 SXM'], label="Tipo di GPU", value='A100 80GB')
|
46 |
+
model_dropdown = gr.Dropdown(choices=['Llama-3-70B', 'Llama-3-8B'], label="Nome del Modello", value='Llama-3-70B')
|
47 |
+
prompt_size = gr.Slider(minimum=1, maximum=8192, label="Dimensione della Promessa", value=4096)
|
48 |
+
response_size = gr.Slider(minimum=1, maximum=8192, label="Dimensione della Risposta", value=256)
|
49 |
+
concurrent_requests = gr.Slider(minimum=1, maximum=100, label="Richieste Concorrenti", value=10)
|
50 |
|
51 |
+
table = gr.Textbox()
|
52 |
|
53 |
+
submit_button = gr.Button("Estimare")
|
54 |
|
55 |
submit_button.click(
|
56 |
+
fn=lambda gpu=gpu_dropdown.value, model=model_dropdown.value,
|
57 |
+
prompt_size=prompt_size.value, response_size=response_size.value,
|
58 |
+
concurrent_requests=concurrent_requests.value:
|
59 |
+
estimate_llm_capacity(model, gpu, prompt_size, response_size, concurrent_requests),
|
60 |
+
inputs=[gpu_dropdown, model_dropdown, prompt_size, response_size, concurrent_requests],
|
61 |
outputs=[table]
|
62 |
)
|
63 |
|