File size: 2,789 Bytes
cd6f25f
 
 
 
d2c787a
cd6f25f
d2c787a
 
cd6f25f
 
 
 
 
d2c787a
 
 
 
 
 
 
 
 
cd6f25f
 
d2c787a
cd6f25f
 
 
 
 
 
 
 
 
 
 
 
0d012be
cd6f25f
 
 
 
 
 
 
 
5ecc655
7993b36
5ecc655
d2c787a
5ecc655
da413e9
5ecc655
 
 
 
ef094af
5ecc655
 
 
 
cd6f25f
 
 
d2c787a
cd6f25f
 
 
 
 
 
 
 
 
 
 
d2c787a
 
cd6f25f
d2c787a
cd6f25f
f9eccf3
cd6f25f
 
d2c787a
 
 
cd6f25f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
import json
from datetime import datetime

import gradio as gr
from openai import OpenAI


def print_now(msg):
    now = datetime.now()
    formatted_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
    print(f"{msg}:{formatted_time}")
    return formatted_time

def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
):
    try:
        default_system ="You are Tencent's helpful AI assistant Hunyuan."

        messages = [{"Role": "system", "Content": default_system}]
        client = OpenAI(
            api_key=os.getenv('HUNYUAN_API_KEY'),
            base_url="https://api.hunyuan.cloud.tencent.com/v1",
        )
        for val in history:
            if val[0] and val[1]:
                messages.append({"Role": "user", "Content": val[0]})
                messages.append({"Role": "assistant", "Content": val[1]})
        
        messages.append({"Role": "user", "Content": message})
        completion = client.chat.completions.create(
            model="hunyuan-t1-latest",
            messages=messages,
            stream=True,
            extra_body={
            "stream_moderation": True,
            "enable_enhancement": False,
            }
        )
        response = ""
        is_reasoning_start = True
        is_reasoning_end = True
        

        for event in completion:
            if hasattr(event.choices[0].delta, 'reasoning_content'):
                if is_reasoning_start:
                    response += '> **开始思考**\n\n'
                    is_reasoning_start = False
                token = event.choices[0].delta.reasoning_content
            else:
                if is_reasoning_end:
                    response += '> **结束思考**\n\n'
                    is_reasoning_end = False
                token = event.choices[0].delta.content
            yield response
    except Exception as e:
        raise gr.Error(f"发生错误: {str(e)}")

example_prompts = [
    ["How to cook Kung Pao chicken the tastiest?"],
    ["Help me create an email expressing my greetings to an old friend."],
    ["写一篇关于青春的五言绝句"],
    ["一枚反面朝上的硬币,被翻转了15下后,它的上面是正面,这个说法正确吗?"]
]
latex_delimiters = [
    {"left": "$$", "right": "$$", "display": True},
    {"left": "\\[", "right": "\\]", "display": True},{"left": "$", "right": "$", "display": False},
    {"left": "\\(", "right": "\\)", "display": False}
]


chatbot = gr.Chatbot(latex_delimiters=latex_delimiters, scale=9)

demo = gr.ChatInterface(respond,
    title="Hunyuan T1",
    examples=example_prompts,
    chatbot=chatbot
)

if __name__ == "__main__":
    demo.queue(default_concurrency_limit=40)
    demo.launch(max_threads=40)