File size: 3,906 Bytes
0013d95
bb7af57
4f8f8b7
bb7af57
0013d95
bb7af57
 
e2409e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0013d95
3dacc3e
 
 
 
 
 
 
 
 
 
 
 
bb7af57
1a81701
 
 
3dacc3e
 
 
 
 
 
 
 
 
 
 
 
 
bb7af57
3dacc3e
bb7af57
 
3dacc3e
bb7af57
 
 
 
4f8f8b7
1a81701
 
 
 
 
4f8f8b7
 
 
 
e6c597c
 
 
 
1a81701
bb7af57
4f8f8b7
1a81701
4f8f8b7
0013d95
 
bb7af57
 
 
4f8f8b7
 
 
 
 
 
 
 
 
 
 
 
e2409e8
 
 
bb7af57
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
import openai
from constants import *


openai.api_key = OPENAI_API_KEY

title = "Car Seats Voice Commands"

description = """
This is a demo for controlling car seats with Voice Commands, On the left there's the inputs section
and on the right you'll find your outputs. For the inputs you have two choices **Voice** and **Text**,
Use **Voice** If you want a closer experience to the final product, Or use **Text** if you just want to test the command model.
for the outputs you have the **transcription**(Please check that it's accurate), **command**(to know which
command the system detected) and you have the robot voice (again use this if you want a more real experience).

**Features** : You can either activate of deactivate the following features 
- Heated Seats 
- Cooled Seats 
- Massage Seats 

Examples:
- **Direct Commands** : Try to say something like "Activate heated seats" or "Turn Off massage seats"
- **Indirect Commands** : Try "My back is cold" , "No heating is needed anymore" or "I'm stressed today"
"""

article = """
This demo processes commands in two steps, the first step is the transcription phase and the second is the
Command Classification phase. For Transcription I used The OpenAi whisper model, and for the classification 
I Fine-Tuned the OpenAi **ada** model on Car Seats Command.
"""


# def get_command(command, model, id2label):
#     """
#     This function get the classification outputs from openai API
#     """
#     completion = openai.Completion.create(
#         model=model, prompt=f"{command}->", max_tokens=1, temperature=0
#     )
#     id = int(completion["choices"][0]["text"].strip())
#     result = id2label[id] if id in id2label else "unknown"
#     return result


def get_command(command, model, id2label):
    """
    This function get the classification outputs from openai API
    """
    prompt = f"""
    We want to control the seats of a car which has features to cool, heat, or massage a seat. The user said "{command}", Which feature we should use to ensure user comfort? Give just the number of the feature.
    Mapping:
    1: "massage_seats_on"
    2: "massage_seats_off"
    3: "heated_seats_on"
    4: "heated_seats_off"
    5: "cooled_seats_on"
    6: "cooled_seats_off"

    Command_Code:
    """

    completion = openai.Completion.create(
        model="text-davinci-003", prompt=prompt, max_tokens=2, temperature=0
    )
    id = int(completion["choices"][0]["text"].strip())

    result = id2label[id] if id in id2label else "unknown"
    return result


def transcribe(audio, text):
    """
    if text provided the function will classify the input directly.
    if not the audio will be transcribed then the transcription will be classified.
    """

    if text:
        result = get_command(text, MODEL, id2label)
        return "Text provided by the user", text_respnses[result], None

    # getting text transcription
    audio_file = open(audio, "rb")
    transcription = openai.Audio.transcribe("whisper-1", audio_file, language="en")
    transcription = transcription["text"]

    result = get_command(transcription, MODEL, id2label)
    audio_res = resoponses.get(result)()

    return transcription, text_respnses[result], audio_res


if __name__ == "__main__":
    gr.Interface(
        fn=transcribe,
        inputs=[
            gr.Audio(label="", source="microphone", type="filepath"),
            gr.Textbox(label="If you prefer type your command (more accurate)"),
        ],
        outputs=[
            gr.Textbox(
                label="Input Transcription (Please check that this matches what you've said)"
            ),
            gr.Textbox(label="Machine Response (Text Version)"),
            gr.Audio(label="Machine Response (Audio Version)"),
        ],
        allow_flagging="auto",
        title=title,
        description=description,
        article=article,
    ).launch()