rishikumar20202023 commited on
Commit
e9df1da
·
verified ·
1 Parent(s): fc2ec29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -48,6 +48,18 @@ h1 {
48
  }
49
  """
50
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # Load the tokenizer and model
52
  tokenizer = AutoTokenizer.from_pretrained("HumanLLMs/Human-Like-LLama3-8B-Instruct")
53
  model = AutoModelForCausalLM.from_pretrained("HumanLLMs/Human-Like-LLama3-8B-Instruct", device_map="auto") # to("cuda:0")
@@ -62,20 +74,12 @@ def chat_llama3_8b(message: str,
62
  temperature: float,
63
  max_new_tokens: int
64
  ) -> str:
65
- """
66
- Generate a streaming response using the llama3-8b model.
67
- Args:
68
- message (str): The input message.
69
- history (list): The conversation history used by ChatInterface.
70
- temperature (float): The temperature for generating the response.
71
- max_new_tokens (int): The maximum number of new tokens to generate.
72
- Returns:
73
- str: The generated response.
74
- """
75
- conversation = []
76
- for user, assistant in history:
77
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
78
- conversation.append({"role": "user", "content": message})
79
 
80
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
81
 
 
48
  }
49
  """
50
 
51
+
52
+ import json
53
+
54
+
55
+ import json
56
+
57
+ def str_to_json(str_obj):
58
+ json_obj = json.loads(str_obj)
59
+ return json_obj
60
+
61
+
62
+
63
  # Load the tokenizer and model
64
  tokenizer = AutoTokenizer.from_pretrained("HumanLLMs/Human-Like-LLama3-8B-Instruct")
65
  model = AutoModelForCausalLM.from_pretrained("HumanLLMs/Human-Like-LLama3-8B-Instruct", device_map="auto") # to("cuda:0")
 
74
  temperature: float,
75
  max_new_tokens: int
76
  ) -> str:
77
+
78
+ x = str_to_json(str(message)
79
+ conversation = x['messages']
80
+ # for user, assistant in history:
81
+ # conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
82
+ # conversation.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
83
 
84
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
85