IlyasMoutawwakil HF Staff commited on
Commit
94aedaa
·
verified ·
1 Parent(s): 35dcbe0

Upload cuda_inference_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark_config.json with huggingface_hub

Browse files
cuda_inference_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark_config.json CHANGED
@@ -24,6 +24,7 @@
24
  "low_cpu_mem_usage": null,
25
  "attn_implementation": null,
26
  "cache_implementation": null,
 
27
  "autocast_enabled": false,
28
  "autocast_dtype": null,
29
  "torch_compile": false,
@@ -75,7 +76,7 @@
75
  "cpu_ram_mb": 66697.248768,
76
  "system": "Linux",
77
  "machine": "x86_64",
78
- "platform": "Linux-5.10.230-223.885.amzn2.x86_64-x86_64-with-glibc2.35",
79
  "processor": "x86_64",
80
  "python_version": "3.10.12",
81
  "gpu": [
@@ -89,11 +90,11 @@
89
  "transformers_commit": null,
90
  "accelerate_version": "1.3.0",
91
  "accelerate_commit": null,
92
- "diffusers_version": "0.32.2",
93
  "diffusers_commit": null,
94
  "optimum_version": null,
95
  "optimum_commit": null,
96
- "timm_version": "1.0.14",
97
  "timm_commit": null,
98
  "peft_version": "0.14.0",
99
  "peft_commit": null
 
24
  "low_cpu_mem_usage": null,
25
  "attn_implementation": null,
26
  "cache_implementation": null,
27
+ "allow_tf32": false,
28
  "autocast_enabled": false,
29
  "autocast_dtype": null,
30
  "torch_compile": false,
 
76
  "cpu_ram_mb": 66697.248768,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
+ "platform": "Linux-5.10.234-225.921.amzn2.x86_64-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.12",
82
  "gpu": [
 
90
  "transformers_commit": null,
91
  "accelerate_version": "1.3.0",
92
  "accelerate_commit": null,
93
+ "diffusers_version": "0.33.1",
94
  "diffusers_commit": null,
95
  "optimum_version": null,
96
  "optimum_commit": null,
97
+ "timm_version": "1.0.15",
98
  "timm_commit": null,
99
  "peft_version": "0.14.0",
100
  "peft_commit": null