File size: 5,568 Bytes
66f4381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import gradio as gr
import torch
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from huggingface_hub import hf_hub_download
import joblib
import random
import os

# Function to load the model from HuggingFace
def load_model_from_hf(model_id="jersonalvr/random"):
    # Create a cache directory
    cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "random_number_generator")
    os.makedirs(cache_dir, exist_ok=True)

    # Download tokenizer
    tokenizer = DistilBertTokenizer.from_pretrained(model_id, cache_dir=cache_dir)
    
    # Download model
    model = DistilBertForSequenceClassification.from_pretrained(model_id, cache_dir=cache_dir)
    
    # Download label encoder
    try:
        label_encoder_path = hf_hub_download(
            repo_id=model_id, 
            filename="label_encoder.joblib",
            cache_dir=cache_dir
        )
        label_encoder = joblib.load(label_encoder_path)
    except Exception as e:
        print(f"Error downloading label encoder: {e}")
        # Fallback: create a basic label encoder
        from sklearn.preprocessing import LabelEncoder
        label_encoder = LabelEncoder()
        label_encoder.classes_ = ['generar_numero_unico', 'generar_numero_digitos', 'generar_numeros_rango', 'generar_numeros_sin_rango']
    
    return model, tokenizer, label_encoder

# Function to predict intent
def predict_intent(prompt, model, tokenizer, label_encoder):
    inputs = tokenizer(
        prompt,
        return_tensors="pt",
        max_length=32,
        truncation=True,
        padding='max_length'
    )

    with torch.no_grad():
        logits = model(**inputs).logits
    
    pred_id = torch.argmax(logits, dim=1).item()
    intent = label_encoder.inverse_transform([pred_id])[0]
    return intent

# Intelligent parameter extraction
def extract_parameters(prompt):
    # Basic parameter extraction logic
    params = {
        "count": 1,
        "min": 0,
        "max": 9999
    }

    # Look for number of numbers
    if "un número" in prompt or "un numero" in prompt:
        params["count"] = 1
    elif any(word in prompt for word in ["2 números", "2 numeros", "dos números", "dos numeros"]):
        params["count"] = 2
    elif any(word in prompt for word in ["3 números", "3 numeros", "tres números", "tres numeros"]):
        params["count"] = 3
    elif any(word in prompt for word in ["4 números", "4 numeros", "cuatro números", "cuatro numeros"]):
        params["count"] = 4
    elif any(word in prompt for word in ["5 números", "5 numeros", "cinco números", "cinco numeros"]):
        params["count"] = 5
    elif any(word in prompt for word in ["10 números", "10 numeros", "diez números", "diez numeros"]):
        params["count"] = 10

    # Look for specific ranges
    ranges = [
        (0, 9, "un dígito", "un digito"),
        (10, 99, "dos dígitos", "dos digitos"),
        (100, 999, "tres dígitos", "tres digitos"),
        (1000, 9999, "cuatro dígitos", "cuatro digitos"),
    ]

    for min_val, max_val, *range_words in ranges:
        if any(word in prompt.lower() for word in range_words):
            params["min"] = min_val
            params["max"] = max_val
            break

    # Custom range extraction
    import re
    range_match = re.search(r'entre\s+(-?\d+)\s+y\s+(-?\d+)', prompt.lower())
    if range_match:
        params["min"] = int(range_match.group(1))
        params["max"] = int(range_match.group(2))

    return params

# Function to generate numbers
def generate_numbers(intent_params, distinct=False):
    count = intent_params["count"]
    min_val = intent_params["min"]
    max_val = intent_params["max"]
    
    # Handle distinct numbers case
    if distinct and count <= (max_val - min_val + 1):
        return random.sample(range(min_val, max_val + 1), count)
    else:
        return [random.randint(min_val, max_val) for _ in range(count)]

# Predefined example prompts
EXAMPLE_PROMPTS = [
    "Dame un número de dos dígitos",
    "Genera 3 números entre 1 y 10",
    "Necesito un número aleatorio",
    "Dame 5 números de tres dígitos",
    "Quiero 2 números entre 100 y 200"
]

def number_generator(prompt, distinct):
    # Load model and utilities
    model, tokenizer, label_encoder = load_model_from_hf()
    
    # Predict intent
    intent = predict_intent(prompt, model, tokenizer, label_encoder)
    
    # Extract parameters intelligently
    intent_params = extract_parameters(prompt)
    
    # Generate numbers
    numbers = generate_numbers(intent_params, distinct)
    
    return {
        "Prompt": prompt,
        "Intent": intent,
        "Parameters": intent_params,
        "Generated Numbers": numbers
    }

# Create Gradio interface
def create_gradio_app():
    iface = gr.Interface(
        fn=number_generator,
        inputs=[
            gr.Textbox(label="Enter your prompt"),
            gr.Checkbox(label="Distinct Numbers", value=False)
        ],
        outputs=[
            gr.JSON(label="Result"),
        ],
        title="Random Number Generator with Intent Classification",
        description="Generate numbers based on your natural language prompt",
        examples=[[prompt, False] for prompt in EXAMPLE_PROMPTS],
        theme="default"
    )
    
    return iface

# Launch the app
if __name__ == "__main__":
    app = create_gradio_app()
    app.launch(share=True)