Alejadro Sanchez-Giraldo commited on
Commit
26db21a
·
1 Parent(s): 0a1e0cd

simple implementation of flags

Browse files
Files changed (3) hide show
  1. README.md +2 -1
  2. app.py +28 -11
  3. requirements.txt +2 -1
README.md CHANGED
@@ -32,7 +32,8 @@ source dschatbot/bin/activate
32
  2. Run the chatbot:
33
 
34
  ```bash
35
- python app.py
 
36
  ```
37
 
38
  ## Usage
 
32
  2. Run the chatbot:
33
 
34
  ```bash
35
+ source .env
36
+ sdkKEY=${sdkKEY} python app.py
37
  ```
38
 
39
  ## Usage
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import logging
3
  import gradio as gr
@@ -5,6 +6,12 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
5
  import torch
6
  import uuid
7
  import time
 
 
 
 
 
 
8
 
9
 
10
  def capture_logs(log_body, log_file, uuid_label):
@@ -35,6 +42,24 @@ print("CUDA available: ", torch.cuda.is_available())
35
  print("MPS available: ", torch.backends.mps.is_available())
36
 
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  tokenizer = AutoTokenizer.from_pretrained(
39
  "deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True)
40
  model = AutoModelForCausalLM.from_pretrained(
@@ -49,17 +74,8 @@ device = torch.device(
49
  "mps") if torch.backends.mps.is_available() else torch.device("cpu")
50
  model = model.to(device)
51
 
52
- # Theme builder
53
- # gr.themes.builder()
54
-
55
- theme = gr.themes.Soft(
56
- primary_hue="sky",
57
- neutral_hue="slate",
58
- )
59
 
60
  # Function to handle user input and generate a response
61
-
62
-
63
  def chatbot_response(query, tokens, top_k, top_p):
64
  uuid_label = str(uuid.uuid4())
65
 
@@ -83,9 +99,10 @@ def chatbot_response(query, tokens, top_k, top_p):
83
  outputs[0][len(inputs[0]):], skip_special_tokens=True)
84
 
85
  end_time = time.time() # End timer
86
- performance_time = round(end_time - start_time, 2)
87
 
88
- log_body = 'query: %s, pocessTime: %s, tokens: %s, top_k: %s, top_p: %s' % (query, performance_time, tokens, top_k, top_p)
 
89
 
90
  capture_logs(uuid_label, 'query_logs.csv', log_body)
91
 
 
1
+ from ldclient import Context
2
  import os
3
  import logging
4
  import gradio as gr
 
6
  import torch
7
  import uuid
8
  import time
9
+ import ldclient
10
+
11
+ from ldclient.config import Config
12
+
13
+ ldclient.set_config(Config(os.environ.get('sdkKEY')))
14
+ client = ldclient.get()
15
 
16
 
17
  def capture_logs(log_body, log_file, uuid_label):
 
42
  print("MPS available: ", torch.backends.mps.is_available())
43
 
44
 
45
+ context = Context.builder("huggie-face") \
46
+ .set("application", "deepSeekChatbot") \
47
+ .build()
48
+
49
+ flag_value = client.variation("themeColors", context, False)
50
+ if flag_value:
51
+ print("Feature flag on")
52
+ theme = gr.themes.Soft(
53
+ primary_hue="fuchsia",
54
+ neutral_hue="blue",
55
+ )
56
+ else:
57
+ print("Feature flag off")
58
+ theme = gr.themes.Soft(
59
+ primary_hue="sky",
60
+ neutral_hue="slate",
61
+ )
62
+
63
  tokenizer = AutoTokenizer.from_pretrained(
64
  "deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True)
65
  model = AutoModelForCausalLM.from_pretrained(
 
74
  "mps") if torch.backends.mps.is_available() else torch.device("cpu")
75
  model = model.to(device)
76
 
 
 
 
 
 
 
 
77
 
78
  # Function to handle user input and generate a response
 
 
79
  def chatbot_response(query, tokens, top_k, top_p):
80
  uuid_label = str(uuid.uuid4())
81
 
 
99
  outputs[0][len(inputs[0]):], skip_special_tokens=True)
100
 
101
  end_time = time.time() # End timer
102
+ performance_time = round(end_time - start_time, 2)
103
 
104
+ log_body = 'query: %s, pocessTime: %s, tokens: %s, top_k: %s, top_p: %s' % (
105
+ query, performance_time, tokens, top_k, top_p)
106
 
107
  capture_logs(uuid_label, 'query_logs.csv', log_body)
108
 
requirements.txt CHANGED
@@ -5,4 +5,5 @@ transformers
5
  minijinja
6
  torch --extra-index-url https://download.pytorch.org/whl/cu118
7
  torchvision
8
- torchaudio
 
 
5
  minijinja
6
  torch --extra-index-url https://download.pytorch.org/whl/cu118
7
  torchvision
8
+ torchaudio
9
+ launchdarkly-server-sdk