File size: 4,063 Bytes
3ab64f5
 
 
a0eccab
3ab64f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from huggingface_hub import InferenceClient
import re
from pymongo import MongoClient
import os

uri = f"mongodb+srv://{os.getenv('mongo_secret')}@void-uep.guig8vk.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["ImagiGen"]
admin_collection = db["Admin"]

def clean_generated_text(text):
    # Remove asterisks (e.g., **text** or *text*)
    text = re.sub(r"\*+", "", text)

    # Remove special characters (except common punctuation and alphanumeric)
    text = re.sub(r'[^a-zA-Z0-9 .,!?\'"-]', "", text)

    # Normalize multiple spaces into a single space
    text = re.sub(r"\s+", " ", text).strip()
    return text


def generate_prompt_response(api_key, model_name, user_message, max_tokens=1000):
    client = InferenceClient(provider="hf-inference",api_key=api_key)
    messages = [{"role": "user", "content": user_message}]

    # Generate the completion response
    stream = client.chat.completions.create(
        model=model_name, messages=messages, max_tokens=max_tokens, stream=True
    )

    # Collect the response
    response = ""
    for chunk in stream:
        response += chunk.choices[0].delta.content
    return clean_generated_text(response)


def Qwen_72b(user_input):
    api_key = admin_collection.find_one({"model": "qwen"})["key"] 
    model_name = "Qwen/Qwen2.5-72B-Instruct"
    user_input = f'''You are an expert in generating detailed, highly descriptive, and context-rich prompts specifically for fashion image generation. Refine the given prompt into a detailed description suitable for diffusion-based image generation models. Expand the user input by adding attributes like clothing type, fabric, color scheme, patterns, design elements, body posture, background scenery, environment, and accessories. Maintain the user’s core intent and theme. Be highly expressive, visual, and vivid in your language. Output only the final refined prompt. Do not add any explanation or extra text. No headings, no special characters, just the pure refined prompt ready for image generation.
    Refine this prompt: {user_input}'''
    response = generate_prompt_response(api_key, model_name, user_message=user_input)
    return clean_generated_text(response)


def Mixtral(user_input):
    api_key = admin_collection.find_one({"model": "mixtral"})["key"] 
    model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
    user_input = f'''You are an expert fashion stylist and creative prompt engineer. Refine the given user prompt into a structured, artistic, and creative fashion-specific prompt suitable for text-to-image generation models. You should enhance the style, mood, and visual appeal of the description while retaining the original idea. Include additional elements like style inspirations, artistic mood, creative design variations, environment aesthetics, and accessories. Use imaginative yet clear language. Output only the refined prompt. Avoid any extra commentary or meta-text. The output must be the prompt only, nothing else.
    Refine this prompt: {user_input}'''
    response = generate_prompt_response(api_key, model_name, user_message=user_input)
    return clean_generated_text(response)


def microsoft_phi(user_input):
    api_key = admin_collection.find_one({"model": "phi"})["key"] 
    model_name = "microsoft/Phi-3-mini-4k-instruct"
    user_input = f'''You are a precise and efficient prompt engineer specialized in fashion description generation. Refine the user prompt into a clean, well-structured, and concise fashion image generation prompt. Focus on clarity, accuracy, and inclusion of essential details like garment type, color, pattern, material, setting, and design attributes. Keep the prompt easy to understand and straightforward for image generation models. Strictly output only the final refined prompt. No extra information, no explanations, no special characters — only the refined prompt.
    Refine this prompt: {user_input}'''
    response = generate_prompt_response(api_key, model_name, user_message=user_input)
    return clean_generated_text(response)