karths commited on
Commit
79ae093
·
verified ·
1 Parent(s): 703b8d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -77,7 +77,7 @@ LLAMA_MAX_MAX_NEW_TOKENS = 512
77
  LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
78
  LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
79
  llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
80
- llama_model_id = "meta-llama/Llama-3.2-3B-Instruct"
81
  llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
82
  llama_model = AutoModelForCausalLM.from_pretrained(
83
  llama_model_id,
@@ -135,7 +135,7 @@ Given the following issue description:
135
  ---
136
  {issue_text}
137
  ---
138
- Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise. Do not repeat the prompt or include any preamble in your response - just provide the explanation directly.
139
  """
140
  try:
141
  explanation = llama_generate(prompt)
 
77
  LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
78
  LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
79
  llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
80
+ llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
81
  llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
82
  llama_model = AutoModelForCausalLM.from_pretrained(
83
  llama_model_id,
 
135
  ---
136
  {issue_text}
137
  ---
138
+ Providing short explanation why this issue might be classified as a **{quality_name}** issue.
139
  """
140
  try:
141
  explanation = llama_generate(prompt)