Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import torch
|
3 |
import torch.nn.functional as F
|
@@ -6,59 +8,15 @@ import re
|
|
6 |
import math
|
7 |
import logging
|
8 |
|
9 |
-
# ---
|
10 |
-
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
AI_WEIGHTS =
|
13 |
-
|
14 |
-
'potential': 0.4900, 'features': 0.4824, 'various': 0.4785, 'regarding': 0.4689,
|
15 |
-
'remains': 0.4403, 'featuring': 0.4031, 'experience': 0.4026, 'including': 0.3963,
|
16 |
-
'challenges': 0.3548, 'allowing': 0.3528, 'enhance': 0.3437, 'aims': 0.3238,
|
17 |
-
'leading': 0.3064, 'user': 0.3054, 'recent': 0.2717, 'concerns': 0.2707,
|
18 |
-
'capabilities': 0.2684, 'technology': 0.2636, 'devices': 0.2616, 'following': 0.2551,
|
19 |
-
'anticipated': 0.2484, 'unique': 0.2418, 'expressed': 0.2403, 'innovative': 0.2383,
|
20 |
-
'design': 0.2379, 'remain': 0.2371, 'previous': 0.2331, 'priced': 0.2325,
|
21 |
-
'launch': 0.2324, 'enhancing': 0.2319, 'showcasing': 0.2305, 'feature': 0.2288,
|
22 |
-
'particularly': 0.2287, 'set': 0.2228, 'aimed': 0.2216, 'highlighted': 0.2192,
|
23 |
-
'ongoing': 0.2188, 'access': 0.2182, 'available': 0.2159, 'alongside': 0.2144,
|
24 |
-
'introduced': 0.2133, 'previously': 0.2122, 'highlighting': 0.2113, 'models': 0.2081,
|
25 |
-
'faced': 0.2057, 'platforms': 0.2055, 'updates': 0.2037, 'offers': 0.2032,
|
26 |
-
'significantly': 0.2027, 'issues': 0.2021, 'emphasized': 0.1977, 'initially': 0.1955,
|
27 |
-
'content': 0.1926, 'emphasizing': 0.1924, 'options': 0.1874, 'performance': 0.1864,
|
28 |
-
'initial': 0.1832, 'notable': 0.1821, 'additional': 0.1812, 'individuals': 0.1804,
|
29 |
-
'initiative': 0.1802, 'enhanced': 0.1797, 'release': 0.1797, 'currently': 0.1790,
|
30 |
-
'traditional': 0.1769, 'future': 0.1731, 'expected': 0.1725, 'applications': 0.1707,
|
31 |
-
'indicating': 0.1699, 'notably': 0.1658, 'insights': 0.1656, 'noted': 0.1645,
|
32 |
-
'players': 0.1645, 'narrative': 0.1642, 'landscape': 0.1640, 'upcoming': 0.1634,
|
33 |
-
'providing': 0.1631, 'offering': 0.1615, 'enabling': 0.1610, 'gaming': 0.1595,
|
34 |
-
'compared': 0.1553, 'indicated': 0.1539, 'extensive': 0.1530, 'approach': 0.1521,
|
35 |
-
'allows': 0.1519, 'stated': 0.1519, 'development': 0.1515, 'commitment': 0.1495,
|
36 |
-
'highlights': 0.1493, 'essential': 0.1483, 'experiences': 0.1480, 'recently': 0.1471,
|
37 |
-
'suggesting': 0.1457, 'market': 0.1447, 'uncertain': 0.1440, 'potentially': 0.1433
|
38 |
-
}
|
39 |
-
|
40 |
-
OG_WEIGHTS = {
|
41 |
-
'says': 1.0000, 'just': 0.9623, 'people': 0.8774, 'said': 0.8259, 'company': 0.7645,
|
42 |
-
'll': 0.6372, 'make': 0.6237, 'time': 0.5634, 'way': 0.5374, 've': 0.5039,
|
43 |
-
'want': 0.4435, 'like': 0.4426, 'don': 0.4338, 'going': 0.4160, 'really': 0.4126,
|
44 |
-
'use': 0.3769, 'good': 0.3718, 'lot': 0.3710, 'able': 0.3611, 'things': 0.3595,
|
45 |
-
'big': 0.3483, 'doesn': 0.3470, 'right': 0.3453, 'work': 0.3443, 'new': 0.3381,
|
46 |
-
'know': 0.3355, 'think': 0.3218, 'today': 0.3209, 'isn': 0.3039, 'look': 0.3013,
|
47 |
-
'world': 0.2907, 'say': 0.2875, 'best': 0.2825, 'used': 0.2758, 'little': 0.2735,
|
48 |
-
'actually': 0.2724, 'phone': 0.2551, 'thing': 0.2477, 'year': 0.2461, 'come': 0.2328,
|
49 |
-
'told': 0.2315, 'far': 0.2250, 'better': 0.2245, 'didn': 0.2244, 'getting': 0.2209,
|
50 |
-
'help': 0.2193, 'makes': 0.2141, 'got': 0.2139, 'won': 0.2096, 'called': 0.2078,
|
51 |
-
'different': 0.2010, 'verge': 0.2001, 'game': 0.1956, 'looks': 0.1954, 'comes': 0.1953,
|
52 |
-
'years': 0.1935, 'working': 0.1924, 'kind': 0.1899, 'let': 0.1891, 'great': 0.1878,
|
53 |
-
'read': 0.1876, 'number': 0.1868, 'long': 0.1852, 'according': 0.1795, 'coming': 0.1784,
|
54 |
-
'day': 0.1750, 'pretty': 0.1734, 'looking': 0.1685, 'bit': 0.1682, 'place': 0.1677,
|
55 |
-
'start': 0.1667, 'trying': 0.1661, 'sure': 0.1655, 'means': 0.1642, 'course': 0.1641,
|
56 |
-
'week': 0.1637, 'story': 0.1610, 'buy': 0.1589, 'probably': 0.1581, 'play': 0.1561,
|
57 |
-
'using': 0.1554, 'doing': 0.1551, 'hard': 0.1525, 'did': 0.1509, 'money': 0.1497,
|
58 |
-
'point': 0.1472, 'idea': 0.1429, 'end': 0.1425, 'aren': 0.1396, 'fact': 0.1371,
|
59 |
-
'run': 0.1363, 'does': 0.1362, 'case': 0.1331, 'built': 0.1301, 'biggest': 0.1300,
|
60 |
-
'started': 0.1286, 'exactly': 0.1279, 'screen': 0.1277, 'deal': 0.1264, 'apps': 0.1234
|
61 |
-
}
|
62 |
|
63 |
def tokenize(text):
|
64 |
return re.findall(r'\b[a-z]{2,}\b', text.lower())
|
@@ -80,7 +38,6 @@ def classify_text_likelihood(text: str) -> float:
|
|
80 |
net = ai_score - og_score
|
81 |
return 1 / (1 + math.exp(-SIGMOID_K * net))
|
82 |
|
83 |
-
# Wrap words in thick colored underlines based on heuristic
|
84 |
def highlight_heuristic_words(text: str) -> str:
|
85 |
parts = re.split(r'(\b[a-z]{2,}\b)', text)
|
86 |
out = []
|
@@ -168,7 +125,6 @@ if st.button("Classify", type="primary"):
|
|
168 |
probs = F.softmax(logits, dim=-1).cpu()
|
169 |
preds = torch.argmax(probs, dim=-1).cpu()
|
170 |
|
171 |
-
# Highlight each sentence and underline heuristic words
|
172 |
chunks = []
|
173 |
for i, s in enumerate(sentences):
|
174 |
inner = highlight_heuristic_words(s)
|
@@ -183,7 +139,6 @@ if st.button("Classify", type="primary"):
|
|
183 |
chunks.append(span)
|
184 |
st.markdown("".join(chunks), unsafe_allow_html=True)
|
185 |
|
186 |
-
# Scores
|
187 |
avg = torch.mean(probs, dim=0)
|
188 |
model_ai = avg[0].item()
|
189 |
heuristic_ai = classify_text_likelihood(text)
|
@@ -191,4 +146,4 @@ if st.button("Classify", type="primary"):
|
|
191 |
|
192 |
st.subheader(f"🤖 Model AI Likelihood: {model_ai*100:.1f}%")
|
193 |
st.subheader(f"🛠️ Heuristic AI Likelihood: {heuristic_ai*100:.1f}%")
|
194 |
-
st.subheader(f"⚖️ Combined AI Likelihood: {combined*100:.1f}%")
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
import streamlit as st
|
4 |
import torch
|
5 |
import torch.nn.functional as F
|
|
|
8 |
import math
|
9 |
import logging
|
10 |
|
11 |
+
# --- Load heuristic weights from environment secrets ---
|
12 |
+
@st.cache_resource
|
13 |
+
def load_heuristic_weights():
|
14 |
+
ai = json.loads(os.environ["AI_WEIGHTS_JSON"])
|
15 |
+
og = json.loads(os.environ["OG_WEIGHTS_JSON"])
|
16 |
+
return ai, og
|
17 |
|
18 |
+
AI_WEIGHTS, OG_WEIGHTS = load_heuristic_weights()
|
19 |
+
SIGMOID_K = 0.5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
def tokenize(text):
|
22 |
return re.findall(r'\b[a-z]{2,}\b', text.lower())
|
|
|
38 |
net = ai_score - og_score
|
39 |
return 1 / (1 + math.exp(-SIGMOID_K * net))
|
40 |
|
|
|
41 |
def highlight_heuristic_words(text: str) -> str:
|
42 |
parts = re.split(r'(\b[a-z]{2,}\b)', text)
|
43 |
out = []
|
|
|
125 |
probs = F.softmax(logits, dim=-1).cpu()
|
126 |
preds = torch.argmax(probs, dim=-1).cpu()
|
127 |
|
|
|
128 |
chunks = []
|
129 |
for i, s in enumerate(sentences):
|
130 |
inner = highlight_heuristic_words(s)
|
|
|
139 |
chunks.append(span)
|
140 |
st.markdown("".join(chunks), unsafe_allow_html=True)
|
141 |
|
|
|
142 |
avg = torch.mean(probs, dim=0)
|
143 |
model_ai = avg[0].item()
|
144 |
heuristic_ai = classify_text_likelihood(text)
|
|
|
146 |
|
147 |
st.subheader(f"🤖 Model AI Likelihood: {model_ai*100:.1f}%")
|
148 |
st.subheader(f"🛠️ Heuristic AI Likelihood: {heuristic_ai*100:.1f}%")
|
149 |
+
st.subheader(f"⚖️ Combined AI Likelihood: {combined*100:.1f}%")
|