farmax commited on
Commit
fae8f1e
·
verified ·
1 Parent(s): f4405b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -33
app.py CHANGED
@@ -1,26 +1,14 @@
1
  import gradio as gr
2
- import argparse
3
  from tabulate import tabulate
 
4
 
5
- num_gpu = 1
6
- prompt_size = 4096
7
- response_size = 256
8
- n_concurrent_request = 10
9
- avg_context_window = 1024
10
-
11
- def main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_window):
12
- #parser = argparse.ArgumentParser(description='Your script description')
13
- #parser.add_argument('-g', '--num_gpu', type=int, default=1, help='Number of GPUs')
14
- #parser.add_argument('-p', '--prompt_sz', type=int, default=4096, help='Prompt size in tokens')
15
- #parser.add_argument('-r', '--response_sz', type=int, default=256, help='Response size in tokens')
16
- #parser.add_argument('-c', '--n_concurrent_req', type=int, default=10, help='Number of concurrent requests')
17
- #parser.add_argument('-w', '-cw', '--ctx_window', type=int, default=1024, help='Average context window')
18
-
19
- # args = parser.parse_args()
20
-
21
- # Print input
22
- print(f" num_gpu = {num_gpu}, prompt_size = {prompt_size} tokens, response_size = {response_size} tokens")
23
- print(f" n_concurrent_request = {n_concurrent_request}, avg_context_window = {avg_context_window} tokens")
24
 
25
  # Define variables
26
  gpu_specs = [
@@ -35,7 +23,6 @@ def main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_
35
  {"name": "H100 PCIe", "fp16_tflops": 1513, "memory_gb": 80, "memory_bandwidth_gbps": 2000},
36
  {"name": "H100 SXM", "fp16_tflops": 1979, "memory_gb": 80, "memory_bandwidth_gbps": 3350},
37
  {"name": "H100 NVL", "fp16_tflops": 3958, "memory_gb": 188, "memory_bandwidth_gbps": 7800}
38
- # Add or comment out GPU types as needed
39
  ]
40
 
41
  model_specs = [
@@ -47,26 +34,27 @@ def main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_
47
  {"name": "Falcon-7B", "params_billion": 7, "d_model": 4544, "n_heads": 71, "n_layers": 32, "max_context_window": 2048, "d_head": 64},
48
  {"name": "Falcon-40B", "params_billion": 40, "d_model": 8192, "n_heads": 128, "n_layers": 60, "max_context_window": 2048, "d_head": 64},
49
  {"name": "Falcon-180B", "params_billion": 180, "d_model": 14848, "n_heads": 232, "n_layers": 80, "max_context_window": 2048, "d_head": 64}
50
- # Add or comment out model specifications as needed
51
  ]
52
 
53
- BYTES_IN_GB = 1_073_741_824 # 1 GB = 1,073,741,824 bytes
54
 
55
  def calc_kv_cache_size_per_token(n_layers, d_model):
56
- return 2 * 2 * n_layers * d_model / BYTES_IN_GB # GB/token
57
 
58
  def calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window):
59
  kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
60
  target_gpu_mem = kv_cache_size_per_token * avg_context_window * n_concurrent_request + model_spec["params_billion"] * 2
61
  return target_gpu_mem
62
 
63
- print(f"\n******************** Estimate LLM Memory Footprint ********************")
64
  memory_footprint_table = []
65
  for model_spec in model_specs:
66
  kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
67
  memory_footprint = calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window)
68
  memory_footprint_table.append([model_spec['name'], f"{kv_cache_size_per_token:.6f} GiB/token", f"{memory_footprint:.2f} GB"])
69
- print(tabulate(memory_footprint_table, headers=['Model', 'KV Cache Size per Token', 'Memory Footprint'], tablefmt='orgtbl'))
 
 
70
 
71
  def calc_kv_cache_tokens(num_gpu, gpu_memory_gb, model_params_billion, kv_cache_size):
72
  result = (num_gpu * gpu_memory_gb - 2 * model_params_billion) / kv_cache_size
@@ -81,14 +69,13 @@ def main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_
81
  return result if result >= 0 else "OOM"
82
 
83
  def calc_estimated_response_time(prefill_time, generation_time, prompt_size, response_size):
84
- if isinstance(prefill_time, str) or isinstance(generation_time, str): # Check if any are "NA"
85
  return "OOM"
86
- return (prompt_size * prefill_time + response_size * generation_time) / 1000 # convert ms to seconds
87
 
88
- print(f"\n******************** Estimate LLM Capacity and Latency ******************** ")
89
  capacity_latency_table = []
90
  for model in model_specs:
91
- # print(f"Model: {model['name']} ({model['params_billion']}B parameters)")
92
  kv_cache_size = calc_kv_cache_size_per_token(model['n_layers'], model['d_model'])
93
  for gpu in gpu_specs:
94
  kv_cache_tokens = calc_kv_cache_tokens(num_gpu, gpu['memory_gb'], model['params_billion'], kv_cache_size)
@@ -96,8 +83,43 @@ def main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_
96
  generation_time_per_token = calc_generation_time_per_token(num_gpu, model['params_billion'], gpu['memory_bandwidth_gbps'])
97
  estimated_response_time = calc_estimated_response_time(prefill_time_per_token, generation_time_per_token, prompt_size, response_size)
98
  capacity_latency_table.append([model['name'], gpu['name'], f"{kv_cache_tokens}", f"{prefill_time_per_token:.3f} ms", f"{generation_time_per_token:.3f} ms", f"{estimated_response_time:.1f} s"])
99
- print(tabulate(capacity_latency_table, headers=['Model', 'GPU', 'KV Cache Tokens', 'Prefill Time', 'Generation Time', 'Estimated Response Time'], tablefmt='orgtbl'))
100
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  if __name__ == "__main__":
103
- main(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_window)
 
1
  import gradio as gr
2
+ import pandas as pd
3
  from tabulate import tabulate
4
+ from io import StringIO
5
 
6
+ def calculate_llm_metrics(num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_window):
7
+ output = StringIO()
8
+
9
+ # Print input to output buffer
10
+ print(f" num_gpu = {num_gpu}, prompt_size = {prompt_size} tokens, response_size = {response_size} tokens", file=output)
11
+ print(f" n_concurrent_request = {n_concurrent_request}, avg_context_window = {avg_context_window} tokens", file=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Define variables
14
  gpu_specs = [
 
23
  {"name": "H100 PCIe", "fp16_tflops": 1513, "memory_gb": 80, "memory_bandwidth_gbps": 2000},
24
  {"name": "H100 SXM", "fp16_tflops": 1979, "memory_gb": 80, "memory_bandwidth_gbps": 3350},
25
  {"name": "H100 NVL", "fp16_tflops": 3958, "memory_gb": 188, "memory_bandwidth_gbps": 7800}
 
26
  ]
27
 
28
  model_specs = [
 
34
  {"name": "Falcon-7B", "params_billion": 7, "d_model": 4544, "n_heads": 71, "n_layers": 32, "max_context_window": 2048, "d_head": 64},
35
  {"name": "Falcon-40B", "params_billion": 40, "d_model": 8192, "n_heads": 128, "n_layers": 60, "max_context_window": 2048, "d_head": 64},
36
  {"name": "Falcon-180B", "params_billion": 180, "d_model": 14848, "n_heads": 232, "n_layers": 80, "max_context_window": 2048, "d_head": 64}
 
37
  ]
38
 
39
+ BYTES_IN_GB = 1_073_741_824
40
 
41
  def calc_kv_cache_size_per_token(n_layers, d_model):
42
+ return 2 * 2 * n_layers * d_model / BYTES_IN_GB
43
 
44
  def calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window):
45
  kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
46
  target_gpu_mem = kv_cache_size_per_token * avg_context_window * n_concurrent_request + model_spec["params_billion"] * 2
47
  return target_gpu_mem
48
 
49
+ print(f"\n******************** Estimate LLM Memory Footprint ********************", file=output)
50
  memory_footprint_table = []
51
  for model_spec in model_specs:
52
  kv_cache_size_per_token = calc_kv_cache_size_per_token(model_spec["n_layers"], model_spec["d_model"])
53
  memory_footprint = calc_memory_footprint(model_spec, n_concurrent_request, avg_context_window)
54
  memory_footprint_table.append([model_spec['name'], f"{kv_cache_size_per_token:.6f} GiB/token", f"{memory_footprint:.2f} GB"])
55
+
56
+ memory_df = pd.DataFrame(memory_footprint_table, columns=['Model', 'KV Cache Size per Token', 'Memory Footprint'])
57
+ print(tabulate(memory_footprint_table, headers=['Model', 'KV Cache Size per Token', 'Memory Footprint'], tablefmt='orgtbl'), file=output)
58
 
59
  def calc_kv_cache_tokens(num_gpu, gpu_memory_gb, model_params_billion, kv_cache_size):
60
  result = (num_gpu * gpu_memory_gb - 2 * model_params_billion) / kv_cache_size
 
69
  return result if result >= 0 else "OOM"
70
 
71
  def calc_estimated_response_time(prefill_time, generation_time, prompt_size, response_size):
72
+ if isinstance(prefill_time, str) or isinstance(generation_time, str):
73
  return "OOM"
74
+ return (prompt_size * prefill_time + response_size * generation_time) / 1000
75
 
76
+ print(f"\n******************** Estimate LLM Capacity and Latency ******************** ", file=output)
77
  capacity_latency_table = []
78
  for model in model_specs:
 
79
  kv_cache_size = calc_kv_cache_size_per_token(model['n_layers'], model['d_model'])
80
  for gpu in gpu_specs:
81
  kv_cache_tokens = calc_kv_cache_tokens(num_gpu, gpu['memory_gb'], model['params_billion'], kv_cache_size)
 
83
  generation_time_per_token = calc_generation_time_per_token(num_gpu, model['params_billion'], gpu['memory_bandwidth_gbps'])
84
  estimated_response_time = calc_estimated_response_time(prefill_time_per_token, generation_time_per_token, prompt_size, response_size)
85
  capacity_latency_table.append([model['name'], gpu['name'], f"{kv_cache_tokens}", f"{prefill_time_per_token:.3f} ms", f"{generation_time_per_token:.3f} ms", f"{estimated_response_time:.1f} s"])
86
+
87
+ capacity_df = pd.DataFrame(capacity_latency_table, columns=['Model', 'GPU', 'KV Cache Tokens', 'Prefill Time', 'Generation Time', 'Estimated Response Time'])
88
+ print(tabulate(capacity_latency_table, headers=['Model', 'GPU', 'KV Cache Tokens', 'Prefill Time', 'Generation Time', 'Estimated Response Time'], tablefmt='orgtbl'), file=output)
89
+
90
+ return output.getvalue(), memory_df, capacity_df
91
+
92
+ # Create Gradio interface
93
+ with gr.Blocks(title="LLM Calculator") as demo:
94
+ gr.Markdown("# LLM Memory and Performance Calculator")
95
+
96
+ with gr.Row():
97
+ with gr.Column():
98
+ num_gpu = gr.Slider(minimum=1, maximum=8, value=1, step=1, label="Number of GPUs")
99
+ prompt_size = gr.Slider(minimum=1, maximum=8192, value=4096, step=1, label="Prompt Size (tokens)")
100
+ response_size = gr.Slider(minimum=1, maximum=2048, value=256, step=1, label="Response Size (tokens)")
101
+ n_concurrent_request = gr.Slider(minimum=1, maximum=50, value=10, step=1, label="Number of Concurrent Requests")
102
+ avg_context_window = gr.Slider(minimum=1, maximum=8192, value=1024, step=1, label="Average Context Window (tokens)")
103
+
104
+ calculate_button = gr.Button("Calculate")
105
+
106
+ with gr.Row():
107
+ with gr.Column():
108
+ text_output = gr.Textbox(label="Detailed Output", lines=10)
109
+
110
+ with gr.Row():
111
+ with gr.Column():
112
+ memory_table = gr.Dataframe(label="Memory Footprint Results")
113
+
114
+ with gr.Row():
115
+ with gr.Column():
116
+ capacity_table = gr.Dataframe(label="Capacity and Latency Results")
117
+
118
+ calculate_button.click(
119
+ calculate_llm_metrics,
120
+ inputs=[num_gpu, prompt_size, response_size, n_concurrent_request, avg_context_window],
121
+ outputs=[text_output, memory_table, capacity_table]
122
+ )
123
 
124
  if __name__ == "__main__":
125
+ demo.launch()