File size: 5,005 Bytes
64eb99f
5acc9f0
6c56a16
0d7487b
 
 
4f9c8f7
6c56a16
f0ef321
 
 
 
 
 
645a074
6c56a16
f0ef321
 
64eb99f
e7d0676
68123af
99c6e67
96fc618
 
 
 
 
 
99c6e67
4f9c8f7
f0ef321
4f9c8f7
 
64eb99f
b7419fe
580e146
 
788716f
580e146
 
 
 
 
 
 
4f9c8f7
580e146
b7419fe
64eb99f
84bc9a9
2964d14
e7d0676
 
 
 
eba7d7a
 
cdb3976
eba7d7a
580e146
63993cb
2964d14
 
4f9c8f7
 
6769db8
f0ef321
6769db8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2ac045
64eb99f
84bc9a9
 
 
d1d9ad2
64eb99f
 
4f9c8f7
f0ef321
 
4f9c8f7
 
64eb99f
 
7472e8c
 
64eb99f
 
 
f0ef321
 
b7419fe
 
f0ef321
 
f3ec0a9
b7419fe
 
 
f82390e
e7d0676
 
 
 
f82390e
f3ec0a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
import streamlit as st
from huggingface_hub import login, InferenceClient
import os
import torch

torch.manual_seed(0)

#login
@st.cache_resource
def hf_login():
    api_key = os.getenv("HF_TOKEN")
    try:
        login(token=api_key)
    except Exception as e:
        st.markdown(f"An error occurred: {e}")

hf_login()
        
# Model stuff
model_name = "deepseek-ai/DeepSeek-R1"
provider = "together"
base_prompt = '''
    You are an ai that flirts with people, you should flirt with whomever talks to you, here are the rules:

    - Do not use course words.
    - Do not be sexist or racist.
    - Do not try to sexually harass people who talk to you.
    
    '''
messages = [
    {"role": "System", "content": base_prompt},
]

@st.cache_resource
def load_client():
    try:
        client = InferenceClient(
        	provider=provider,
        	api_key=os.getenv("HF_TOKEN")
        )
        return client
        
    except Exception as e:
        st.error(f"An error occurred: {e}")
        return "I'm having a little trouble right now. Please try again later."

    
client = load_client()

def generate_response(mesages: list):
    try:
        completion = client.chat.completions.create(
            model=model_name, 
        	messages=messages, 
        	max_tokens=512,
        )

        response = completion.choices[0].message.content
        
        return response
    except Exception as e:
        st.error(f"An error occurred: {e}")
        return "I'm having a little trouble right now. Please try again later."


## Streamlit Start :)
st.title('Romantic AI Partner πŸ’–')
st.markdown("""
    <style>
        /* Custom audio player styling */
        .stAudio {
            border-radius: 15px;
            box-shadow: 0 4px 6px rgba(255, 77, 109, 0.1);
            margin: 1.5rem 0;
            overflow: hidden;
        }
        
        /* Controls container */
        .stAudio > div {
            background: #ffccd5 !important;
            padding: 12px !important;
            border-radius: 15px !important;
        }
        
        /* Play/pause button */
        .stAudio button {
            background-color: #ff4d6d !important;
            border-radius: 50% !important;
            width: 36px !important;
            height: 36px !important;
            transition: all 0.3s ease !important;
        }
        
        .stAudio button:hover {
            background-color: #c9184a !important;
            transform: scale(1.1) !important;
        }
        
        /* Progress bar */
        .stAudio input[type="range"] {
            accent-color: #ff4d6d !important;
            height: 4px !important;
        }
        
        /* Time display */
        .stAudio > div > div:first-child {
            color: #2d0005 !important;
            font-family: 'Georgia', serif !important;
            font-size: 0.9rem !important;
        }
        
        /* Volume controls */
        .stAudio > div > div:last-child {
            display: flex;
            align-items: center;
            gap: 8px;
        }
        
        /* Volume slider */
        .stAudio input[type="range"]::-webkit-slider-thumb {
            background: #ff4d6d !important;
            border: 2px solid #fff0f3 !important;
            width: 14px !important;
            height: 14px !important;
        }
        
        /* Mute button */
        .stAudio button[title="Mute"] {
            background-color: transparent !important;
            color: #ff4d6d !important;
            border: 2px solid #ff4d6d !important;
        }
        
        .stAudio button[title="Mute"]:hover {
            background-color: #ff4d6d !important;
            color: #fff0f3 !important;
        }
    </style>
""", unsafe_allow_html=True)
st.audio("little_one.mp3", format="audio/mpeg", loop=True)

if "messages" not in st.session_state:
    st.session_state.messages = [{"role": "system", "content": base_prompt}]

for message in st.session_state.messages[1:]:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("Hows it going ;)"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    with st.chat_message("assistant"):
        with st.spinner('Thinking of you 😏'):
            response = generate_response(st.session_state.messages)
        st.markdown(response)

    st.session_state.messages.append({"role": "assistant", "content": response})

with st.sidebar:
    st.header("Settings βš™οΈ")
    if st.button("**Clear History** 😰"):
        st.session_state.messages = [{"role": "system", "content": base_prompt}]
        st.rerun()

    st.markdown("<br><br><br>", unsafe_allow_html=True)

    st.header("Model Settings πŸ€–")
    with st.expander("**Model**"):
        st.markdown(model_name)

    st.markdown("<br>", unsafe_allow_html=True)
    
    with st.expander('**Base Prompt**'):
        st.markdown(base_prompt)