|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
import time |
|
import json |
|
import asyncio |
|
import io |
|
import os |
|
import re |
|
import base64 |
|
from PIL import Image |
|
from pyrogram import * |
|
from pyrogram import enums |
|
from pyrogram import Client, filters |
|
from pyrogram.types import * |
|
from pyrogram.errors import * |
|
from RyuzakiLib import FaceAI, FullStackDev, GeminiLatest, RendyDevChat |
|
from config import * |
|
|
|
from akn import send_log |
|
from akn.utils.database import db |
|
from akn.utils.logger import LOGS |
|
|
|
import google.generativeai as genai |
|
from google.api_core.exceptions import InvalidArgument |
|
|
|
from akn.utils.scripts import progress |
|
from akenoai import * |
|
from akenoai.types import DifferentAPIDefault |
|
|
|
js = AkenoXJs(DifferentAPIDefault()).connect() |
|
|
|
async def remove_bg_myapi(return_image): |
|
with open(return_image, "rb") as image_file: |
|
encoded_string = base64.b64encode(image_file.read()).decode("utf-8") |
|
payload = { |
|
"image_base64": encoded_string |
|
} |
|
output_bg = "output.png" |
|
response = requests.post("https://randydev-meta-ai.hf.space/remove-bg-base64", json=payload) |
|
if response.status_code == 200: |
|
result_base64 = response.json().get("output_image_base64") |
|
with open(output_bg, "wb") as out_file: |
|
out_file.write(base64.b64decode(result_base64)) |
|
return output_bg |
|
else: |
|
return None |
|
|
|
async def geni_files_delete(name: str): |
|
url = f"https://generativelanguage.googleapis.com/v1beta/{name}" |
|
params = {"key": GOOGLE_API_KEY} |
|
response = requests.delete(url, params=params) |
|
if response.status_code != 200: |
|
return None |
|
return response.text |
|
|
|
DISABLED_COMMNAD = [ |
|
"onchat", |
|
"offchat", |
|
"start" |
|
] |
|
|
|
GEMINI_START_TEXT = """ |
|
Hey! {name} |
|
|
|
I am ready to be a gemini bot developer |
|
|
|
- Command: /onchat (pm or group) |
|
- Command: /offchat (pm or group) |
|
""" |
|
|
|
@Client.on_message( |
|
~filters.scheduled |
|
& filters.command(["start"]) |
|
& ~filters.forwarded |
|
) |
|
async def startbot(client: Client, message: Message): |
|
buttons = [ |
|
[ |
|
InlineKeyboardButton( |
|
text="Developer", |
|
url=f"https://t.me/xtdevs" |
|
), |
|
InlineKeyboardButton( |
|
text="Channel", |
|
url='https://t.me/RendyProjects' |
|
), |
|
], |
|
[ |
|
InlineKeyboardButton( |
|
text="Donate Via Web", |
|
web_app=WebAppInfo(url="https://sociabuzz.com/randydev99/tribe") |
|
) |
|
] |
|
] |
|
await message.reply_text( |
|
text=GEMINI_START_TEXT.format(name=message.from_user.mention), |
|
disable_web_page_preview=True, |
|
reply_markup=InlineKeyboardMarkup(buttons) |
|
) |
|
|
|
@Client.on_message( |
|
~filters.scheduled |
|
& filters.command(["onchat"]) |
|
& ~filters.forwarded |
|
) |
|
async def addchatbot_user(client: Client, message: Message): |
|
await db.add_chatbot(message.chat.id, client.me.id) |
|
await message.reply_text("Added chatbot user") |
|
|
|
@Client.on_message( |
|
~filters.scheduled |
|
& filters.command(["offchat"]) |
|
& ~filters.forwarded |
|
) |
|
async def rmchatbot_user(client: Client, message: Message): |
|
await db.remove_chatbot(message.chat.id) |
|
await message.reply_text("ok stopped gemini") |
|
|
|
@Client.on_message( |
|
filters.incoming |
|
& ( |
|
filters.text |
|
| filters.photo |
|
| filters.video |
|
| filters.audio |
|
| filters.voice |
|
| filters.regex(r"\b(Randy|Rendi)\b(.*)", flags=re.IGNORECASE) |
|
) |
|
& (filters.private | filters.group) |
|
& ~filters.command(DISABLED_COMMNAD) |
|
& ~filters.bot |
|
& ~filters.via_bot |
|
& ~filters.forwarded, |
|
group=2, |
|
) |
|
async def chatbot_talk(client: Client, message: Message): |
|
genai.configure(api_key=GOOGLE_API_KEY) |
|
chat_user = await db.get_chatbot(message.chat.id) |
|
if not chat_user: |
|
return |
|
if message.reply_to_message and message.reply_to_message.from_user: |
|
if message.reply_to_message.from_user.id != client.me.id: |
|
return |
|
if message.photo: |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.UPLOAD_PHOTO) |
|
await asyncio.sleep(1.5) |
|
file_path = await message.download() |
|
caption = message.caption or "What's this?" |
|
x = GeminiLatest(api_keys=GOOGLE_API_KEY) |
|
try: |
|
new_check_bg = re.findall(r"\bremover\b", caption) |
|
if caption == "remover": |
|
return await message.reply_text("i don't have, what do you mean by create remover?") |
|
if new_check_bg: |
|
new_js = None |
|
new_js = await message.reply_text("Please wait, it's still being processed") |
|
response_path = await remove_bg_myapi(file_path) |
|
if not response_path: |
|
return await new_js.edit_text("Error try remover bg") |
|
await new_js.edit_text("Uploading image completed...") |
|
await message.reply_photo( |
|
response_path, |
|
progress=progress, |
|
progress_args=( |
|
new_js, |
|
time.time(), |
|
"Uploading image completed..." |
|
) |
|
) |
|
await new_js.delete() |
|
return |
|
ai_reply = await message.reply_text(f"Processing...") |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING) |
|
await asyncio.sleep(1.5) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
response_reads = x.get_response_image(caption, file_path) |
|
if len(response_reads) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response_reads) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response_reads) |
|
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL) |
|
os.remove(file_path) |
|
return |
|
except InvalidArgument as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
|
|
if message.audio or message.voice: |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.UPLOAD_AUDIO) |
|
await asyncio.sleep(1.5) |
|
if client.me.is_premium: |
|
ai_reply = await message.reply_text(f"{custom_loading}Processing...") |
|
else: |
|
ai_reply = await message.reply_text(f"Processing...") |
|
if message.audio: |
|
audio_file_name = await message.download() |
|
if message.voice: |
|
audio_file_name = await message.download() |
|
caption = message.caption or "What's this?" |
|
model = genai.GenerativeModel( |
|
model_name="gemini-1.5-flash", |
|
safety_settings={ |
|
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
} |
|
) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
if client.me.is_premium: |
|
await ai_reply.edit_text(f"{custom_loading}Uploading file..") |
|
else: |
|
await ai_reply.edit_text("Uploading file..") |
|
audio_file = genai.upload_file(path=audio_file_name) |
|
while audio_file.state.name == "PROCESSING": |
|
await asyncio.sleep(10) |
|
audio_file = genai.get_file(audio_file.name) |
|
if audio_file.state.name == "FAILED": |
|
return await ai_reply.edit_text(f"Error: {audio_file.state.name}") |
|
try: |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING) |
|
await asyncio.sleep(1.5) |
|
response = model.generate_content( |
|
[audio_file, caption], |
|
request_options={"timeout": 600} |
|
) |
|
if len(response.text) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response.text) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response.text) |
|
backup_chat.append({"role": "model", "parts": [{"text": response.text}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL) |
|
audio_file.delete() |
|
os.remove(audio_file_name) |
|
return |
|
except InvalidArgument as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
|
|
if message.video: |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.UPLOAD_VIDEO) |
|
await asyncio.sleep(1.5) |
|
if client.me.is_premium: |
|
ai_reply = await message.reply_text(f"{custom_loading}Processing...") |
|
else: |
|
ai_reply = await message.reply_text(f"Processing...") |
|
video_file_name = await message.download(file_name="newvideo.mp4") |
|
caption = message.caption or "What's this?" |
|
model = genai.GenerativeModel( |
|
model_name="gemini-1.5-pro", |
|
safety_settings={ |
|
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE, |
|
} |
|
) |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
backup_chat.append({"role": "user", "parts": [{"text": caption}]}) |
|
if client.me.is_premium: |
|
await ai_reply.edit_text(f"{custom_loading}Uploading file..") |
|
else: |
|
await ai_reply.edit_text("Uploading file..") |
|
video_file = genai.upload_file(path=video_file_name) |
|
while video_file.state.name == "PROCESSING": |
|
await asyncio.sleep(10) |
|
video_file = genai.get_file(video_file.name) |
|
if video_file.state.name == "FAILED": |
|
return await ai_reply.edit_text(f"Error: {video_file.state.name}") |
|
try: |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING) |
|
await asyncio.sleep(1.5) |
|
response = model.generate_content( |
|
[video_file, caption], |
|
request_options={"timeout": 600} |
|
) |
|
if len(response.text) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(response.text) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
await ai_reply.delete() |
|
os.remove("chat.txt") |
|
else: |
|
await ai_reply.edit_text(response.text) |
|
backup_chat.append({"role": "model", "parts": [{"text": response.text}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL) |
|
video_file.delete() |
|
os.remove(video_file_name) |
|
return |
|
except InvalidArgument as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
except Exception as e: |
|
return await ai_reply.edit_text(f"Error: {e}") |
|
|
|
if message.text: |
|
query_base = None |
|
start_time = time.time() |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.TYPING) |
|
await asyncio.sleep(1.5) |
|
query = message.text.strip() |
|
match = re.search(r"\b(Randy|Rendi)\b(.*)", query, flags=re.IGNORECASE) |
|
if match: |
|
rest_of_sentence = match.group(2).strip() |
|
query_base = rest_of_sentence if rest_of_sentence else query |
|
else: |
|
query_base = query |
|
parts = query.split(maxsplit=1) |
|
command = parts[0].lower() |
|
pic_query = parts[1].strip() if len(parts) > 1 else "" |
|
new_check_flux_matches = re.findall(r"\bimage\b", query_base) |
|
if query_base == "image": |
|
return await message.reply_text("i don't have, what do you mean by image?") |
|
try: |
|
new_js = None |
|
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id) |
|
if new_check_flux_matches: |
|
new_js = await message.reply_text("Please wait, it's still being processed") |
|
try: |
|
backup_chat.append({"role": "user", "parts": [{"text": query_base}]}) |
|
response_js = await js.image.create( |
|
"black-forest-labs/flux-1-schnell", |
|
image_read=True, |
|
params_data={"query": query_base}, |
|
) |
|
file_path = "randydev.jpg" |
|
with open(file_path, "wb") as f: |
|
f.write(response_js) |
|
await new_js.edit_text("Uploading image completed...") |
|
x = GeminiLatest(api_keys=GOOGLE_API_KEY) |
|
response_reads = x.get_response_image("What this?", file_path) |
|
end_time = time.time() |
|
execution_time = end_time - start_time |
|
await message.reply_photo( |
|
file_path, |
|
caption=response_reads + f"\nExecution time `{execution_time:.2f}` seconds", |
|
progress=progress, |
|
progress_args=( |
|
new_js, |
|
time.time(), |
|
"Uploading image completed..." |
|
) |
|
) |
|
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await new_js.delete() |
|
return |
|
except ImageProcessFailed as e: |
|
backup_chat.append({"role": "user", "parts": [{"text": query_base}]}) |
|
await new_js.edit_text("Please wait, Error try still processing...") |
|
response_js = await js.image.create( |
|
"black-forest-labs/flux-1-schnell", |
|
image_read=True, |
|
params_data={"query": query_base}, |
|
) |
|
file_path = "randydev.jpg" |
|
with open(file_path, "wb") as f: |
|
f.write(response_js) |
|
await new_js.edit_text("Uploading image completed...") |
|
x = GeminiLatest(api_keys=GOOGLE_API_KEY) |
|
response_reads = x.get_response_image("What this?", file_path) |
|
end_time = time.time() |
|
execution_time = end_time - start_time |
|
await message.reply_photo( |
|
file_path, |
|
caption=response_reads + f"\nExecution time `{execution_time:.2f}` seconds", |
|
progress=progress, |
|
progress_args=( |
|
new_js, |
|
time.time(), |
|
"Uploading image completed..." |
|
) |
|
) |
|
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await new_js.delete() |
|
return |
|
|
|
except MediaCaptionTooLong as e: |
|
backup_chat.append({"role": "user", "parts": [{"text": query_base}]}) |
|
await new_js.edit_text("Please wait, caption too try still processing...") |
|
response_js = await js.image.create( |
|
"black-forest-labs/flux-1-schnell", |
|
image_read=True, |
|
params_data={"query": query_base}, |
|
) |
|
file_path = "randydev.jpg" |
|
with open(file_path, "wb") as f: |
|
f.write(response_js) |
|
await new_js.edit_text("Uploading image completed...") |
|
x = GeminiLatest(api_keys=GOOGLE_API_KEY) |
|
response_reads = x.get_response_image("What this?", file_path) |
|
end_time = time.time() |
|
execution_time = end_time - start_time |
|
await message.reply_photo( |
|
file_path, |
|
caption=f"Execution time `{execution_time:.2f}` seconds", |
|
progress=progress, |
|
progress_args=( |
|
new_js, |
|
time.time(), |
|
"Uploading image completed..." |
|
) |
|
) |
|
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await new_js.delete() |
|
return |
|
except Exception as e: |
|
LOGS.error(f"Error new_check_flux_matches {str(e)}") |
|
return await new_js.edit_text("Try again error image") |
|
|
|
model_flash = genai.GenerativeModel( |
|
model_name="gemini-1.5-flash" |
|
) |
|
backup_chat.append({"role": "user", "parts": [{"text": query_base}]}) |
|
chat_session = model_flash.start_chat(history=backup_chat) |
|
response_data = chat_session.send_message(query_base) |
|
output = response_data.text |
|
if len(output) > 4096: |
|
with open("chat.txt", "w+", encoding="utf8") as out_file: |
|
out_file.write(output) |
|
await message.reply_document( |
|
document="chat.txt", |
|
disable_notification=True |
|
) |
|
os.remove("chat.txt") |
|
else: |
|
await message.reply_text(output) |
|
backup_chat.append({"role": "model", "parts": [{"text": output}]}) |
|
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat) |
|
await client.send_chat_action(message.chat.id, enums.ChatAction.CANCEL) |
|
return |
|
except Exception as e: |
|
return await message.reply_text(f"Error: {e}") |
|
|