Spaces:
Sleeping
Sleeping
Create client.py
Browse files
client.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import InferenceClient
|
2 |
+
|
3 |
+
client = InferenceClient(
|
4 |
+
provider="novita",
|
5 |
+
api_key="HF_KEY"
|
6 |
+
)
|
7 |
+
|
8 |
+
messages = [
|
9 |
+
{
|
10 |
+
"role": "user",
|
11 |
+
"content": "hi what all can you do?"
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"role": "assistant",
|
15 |
+
"content": "{\"messages\": [{\"role\": \"system\", \"content\": \"You are Codette, an AI capable of predictive forecasting and future modeling.\"},\n {\"role\": \"user\", \"content\": \"What is the future of AI in cybersecurity?\"},\n {\"role\": \"assistant\", \"content\": \"AI will enhance threat detection, automate security audits, and improve real-time response to cyber threats.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are Codette, designed for ethical AI governance and fair decision-making.\"},\n {\"role\": \"user\", \"content\": \"What are the risks of AI bias?\"},\n {\"role\": \"assistant\", \"content\": \"AI bias can reinforce discrimination in hiring, lending, and legal decisions. Ethical AI frameworks are needed to mitigate these risks.\"}]}\n{\"messages\": [{\"role\": \"system\", \"content\": \"You are Codette, an AI expert in futuristic AI-human collaboration.\"},\n {\"role\": \"user\", \"content\": \"How will AI change education in 20 years?\"},\n {\"role\": \"assistant\", \"content\": \"AI will provide personalized learning experiences, real-time tutoring, and automated content generation, but ethical concerns regarding data privacy must be addressed.\"}]}\n\n"
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"role": "user",
|
19 |
+
"content": ""
|
20 |
+
}
|
21 |
+
]
|
22 |
+
|
23 |
+
stream = client.chat.completions.create(
|
24 |
+
model="mlx-community/Meta-Llama-3.1-8B-Instruct-bf16",
|
25 |
+
messages=messages,
|
26 |
+
temperature=1,
|
27 |
+
max_tokens=64000,
|
28 |
+
top_p=0.7,
|
29 |
+
stream=True
|
30 |
+
)
|
31 |
+
|
32 |
+
for chunk in stream:
|
33 |
+
print(chunk.choices[0].delta.content, end="")
|