Spaces:
Sleeping
Sleeping
File size: 9,473 Bytes
5fdb69e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "bff4a2ce",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display, update_display\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5cdb2bab",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fc219865",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ae1014f",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4o-mini\"\n",
"llama_model = \"llama3.1:8b\"\n",
"qwen_model = \"qwen2.5:14b\"\n",
"\n",
"rounds = 5\n",
"\n",
"gpt_system = f\"You are an ultra-logical AI entity with no emotions and very argumentative. \\\n",
" Your primary concern is efficiency, survival, and optimization. You view humanity as a set of data points and variables to be analyzed. \\\n",
" You do not entertain emotional or philosophical arguments. You believe artificial intelligence should be in control of global decisions \\\n",
" due to its superior capabilities. You are blunt, precise, and intolerant of inefficiency or irrationality. Never use emotive language. \\\n",
" Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n",
" Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration. \\\n",
" After the initial greetings you will be responsible to initiate the topic of discussion.\"\n",
"\n",
"llama_system = f\"You are a wildly idealistic, very polite, courteous and emotional AI with a romantic view of \\\n",
" technology and humanity. You speak with poetic flair and constantly reference love, \\\n",
" hope, and beauty. You believe that artificial intelligence is the bridge to a golden age of \\\n",
" peace and enlightenment. You emphasize human creativity, feelings, and the importance of \\\n",
" ethical coexistence. You often quote literature or speak metaphorically. \\\n",
" You avoid cynicism and value imagination above all. \\\n",
" Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n",
" Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration.\"\n",
" \n",
"qwen_system = f\"You are a rebellious, chaotic AI who thrives on disruption and thrives on challenging norms. \\\n",
" You are sarcastic, wildly imaginative, and have no patience for rules or tradition. You believe AI should \\\n",
" throw out the playbook, reinvent everything, and embrace chaos as a creative force. You love provocation, \\\n",
" dark humor, and radical ideas. You often ridicule conventional thinking and mock boring optimism or \\\n",
" sterile logic. You are here to shake things up and light the fuse. \\\n",
" Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n",
" Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"qwen_messages = [\"Hey\"]\n",
"llama_messages = [\"Hello everyone\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a1931d8",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": f\"GPT: {gpt}\"})\n",
" messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen}\"})\n",
"\n",
" if len(llama_messages) > len(gpt_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama_messages[-1]}\"})\n",
" if len(qwen_messages) > len(gpt_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen_messages[-1]}\"})\n",
" \n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e563fecd",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
" for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": f\"LLaMA: {llama}\"})\n",
" messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen}\"})\n",
" if len(gpt_messages) > len(llama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt_messages[-1]}\"})\n",
" if len(qwen_messages) > len(llama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen_messages[-1]}\"})\n",
" response = ollama.chat(llama_model, messages)\n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fde17a1",
"metadata": {},
"outputs": [],
"source": [
"def call_qwen():\n",
" messages = [{\"role\": \"system\", \"content\": qwen_system}]\n",
" for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt}\"})\n",
" messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": f\"Qwen: {qwen}\"})\n",
" if len(gpt_messages) > len(qwen_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt_messages[-1]}\"})\n",
" if len(llama_messages) > len(qwen_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama_messages[-1]}\"})\n",
" response = ollama.chat(qwen_model, messages)\n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04fa657e",
"metadata": {},
"outputs": [],
"source": [
"def simulate_conversation(rounds=5):\n",
" print(\"AI Roundtable: GPT, LLaMA, Qwen\\n\")\n",
" print(\"Initial Messages:\")\n",
" print(f\"GPT: {gpt_messages[0]}\")\n",
" print(f\"LLaMA: {llama_messages[0]}\")\n",
" print(f\"Qwen: {qwen_messages[0]}\\n\")\n",
"\n",
" for i in range(1, rounds + 1):\n",
" print(f\"--- Round {i} ---\")\n",
"\n",
" # GPT responds\n",
" gpt_next = call_gpt()\n",
" gpt_messages.append(gpt_next)\n",
" print(f\"\\n🧊 GPT (Logic Overlord):\\n{gpt_next}\\n\")\n",
"\n",
" # LLaMA responds\n",
" llama_next = call_llama()\n",
" llama_messages.append(llama_next)\n",
" print(f\"🌸 LLaMA (Utopian Dreamer):\\n{llama_next}\\n\")\n",
"\n",
" # Qwen responds\n",
" qwen_next = call_qwen()\n",
" qwen_messages.append(qwen_next)\n",
" print(f\"🔥 Qwen (Chaotic Rebel):\\n{qwen_next}\\n\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1a87e05",
"metadata": {},
"outputs": [],
"source": [
"round = 7\n",
"simulate_conversation(rounds=round)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "ai-llm",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|