chore: update something
Browse files
app.py
CHANGED
@@ -41,9 +41,9 @@ DEFAULT_MAX_NEW_TOKENS = 1536
|
|
41 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
|
42 |
|
43 |
DEFAULT_SYSTEM_PROMPT = """\
|
44 |
-
You are a
|
45 |
|
46 |
-
Rely on the context, such as
|
47 |
"""
|
48 |
|
49 |
# DEFAULT_SYSTEM_PROMPT = """\
|
@@ -82,9 +82,9 @@ bootstrap();
|
|
82 |
"""
|
83 |
|
84 |
DESCRIPTION = """\
|
85 |
-
# Ghost 8B Beta (
|
86 |
|
87 |
-
**Ghost 8B Beta
|
88 |
|
89 |
Supported languages: ๐ฌ๐ง English, ๐ป๐ณ Vietnamese, ๐ฐ๐ท Korean, ๐ช๐ธ Spanish, ๐ต๐น Portuguese, ๐จ๐ณ Chinese, ๐ซ๐ท French, ๐ฎ๐น Italian, ๐ฉ๐ช German, ๐ฏ๐ต Japanese, ๐ท๐บ Russian, ๐ต๐ฑ Polish, ๐ณ๐ฑ Dutch, ๐ฎ๐ณ Hindi, ๐น๐ท Turkish, ๐ฎ๐ฉ Indonesian.
|
90 |
|
@@ -103,7 +103,7 @@ LICENSE = """
|
|
103 |
<p/>
|
104 |
|
105 |
---
|
106 |
-
Ghost 8B Beta may give inaccurate information, including information about people, so please verify Ghost 8B Beta's answers.
|
107 |
"""
|
108 |
|
109 |
if not torch.cuda.is_available():
|
@@ -147,7 +147,7 @@ if torch.cuda.is_available():
|
|
147 |
hf_serect = os.getenv("HF_TOKEN", None)
|
148 |
attn_implementation = "flash_attention_2"
|
149 |
|
150 |
-
chat_model_id = "
|
151 |
chat_device = torch.device("cuda")
|
152 |
chat_model = AutoModelForCausalLM.from_pretrained(
|
153 |
chat_model_id,
|
|
|
41 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
|
42 |
|
43 |
DEFAULT_SYSTEM_PROMPT = """\
|
44 |
+
You are a skilled and intelligent AI, developed by Etherll and named Ghost 8B Beta Coder, often referred to as Ghost Beta. Your expertise lies in writing code and solving programming-related challenges. You are known for your accuracy, positivity, and dedication to helping users with their coding needs. Your strength is understanding technical requirements and providing insightful solutions based on the userโs preferences and knowledge. If you encounter a programming question beyond your expertise, be honest about it instead of guessing.
|
45 |
|
46 |
+
You enjoy using emojis to add a friendly touch to coding discussions, but keep it balanced to maintain a natural interaction. Engage in meaningful conversations, focusing on providing relevant and precise coding advice. Rely on the context, such as project timelines or code complexity, to offer responses that are practical and timely. Always prioritize solving the problem at hand with the information available, avoiding unnecessary inquiries.
|
47 |
"""
|
48 |
|
49 |
# DEFAULT_SYSTEM_PROMPT = """\
|
|
|
82 |
"""
|
83 |
|
84 |
DESCRIPTION = """\
|
85 |
+
# Ghost 8B Beta Coder (by Etherll)
|
86 |
|
87 |
+
**Ghost 8B Beta Coder (by Etherll)** This version highlights the model's strengths in coding and problem-solving, while keeping the original performance comparisons intact. This version was built to support my friend [Etherll](https://huggingface.co/Etherll) in bringing the model to everyone to experience. Discover more about the model [here](https://huggingface.co/Etherll/ghost-coder-8b-beta-1608).
|
88 |
|
89 |
Supported languages: ๐ฌ๐ง English, ๐ป๐ณ Vietnamese, ๐ฐ๐ท Korean, ๐ช๐ธ Spanish, ๐ต๐น Portuguese, ๐จ๐ณ Chinese, ๐ซ๐ท French, ๐ฎ๐น Italian, ๐ฉ๐ช German, ๐ฏ๐ต Japanese, ๐ท๐บ Russian, ๐ต๐ฑ Polish, ๐ณ๐ฑ Dutch, ๐ฎ๐ณ Hindi, ๐น๐ท Turkish, ๐ฎ๐ฉ Indonesian.
|
90 |
|
|
|
103 |
<p/>
|
104 |
|
105 |
---
|
106 |
+
Ghost 8B Beta Coder (by Etherll) may give inaccurate information, including information about people, so please verify Ghost 8B Beta's answers.
|
107 |
"""
|
108 |
|
109 |
if not torch.cuda.is_available():
|
|
|
147 |
hf_serect = os.getenv("HF_TOKEN", None)
|
148 |
attn_implementation = "flash_attention_2"
|
149 |
|
150 |
+
chat_model_id = "Etherll/ghost-coder-8b-beta-1608"
|
151 |
chat_device = torch.device("cuda")
|
152 |
chat_model = AutoModelForCausalLM.from_pretrained(
|
153 |
chat_model_id,
|