pmkhanh7890 commited on
Commit
0542c93
·
1 Parent(s): 1ce1659

add requirements

Browse files
requirements.txt CHANGED
@@ -12,6 +12,12 @@ lxml-html-clean==0.4.1
12
  # Text
13
  openai==1.55.3
14
  newspaper4k==0.9.3.1
 
 
 
 
 
 
15
 
16
  # Images
17
  pillow==10.1.0
 
12
  # Text
13
  openai==1.55.3
14
  newspaper4k==0.9.3.1
15
+ transformers
16
+ scikit-learn
17
+ nltk
18
+ numpy
19
+ torch
20
+ sentence-transformers
21
 
22
  # Images
23
  pillow==10.1.0
src/application/content_detection.py CHANGED
@@ -1,5 +1,3 @@
1
-
2
-
3
  from src.application.text.model_detection import detect_by_ai_model
4
  from src.application.text.search_detection import check_human, detect_by_relative_search
5
 
 
 
 
1
  from src.application.text.model_detection import detect_by_ai_model
2
  from src.application.text.search_detection import check_human, detect_by_relative_search
3
 
src/application/content_generation.py CHANGED
@@ -1,7 +1,6 @@
1
  import openai
2
  from dotenv import load_dotenv
3
  import os
4
- import re
5
 
6
  load_dotenv()
7
  AZURE_OPENAI_API_KEY = os.getenv('AZURE_OPENAI_API_KEY')
 
1
  import openai
2
  from dotenv import load_dotenv
3
  import os
 
4
 
5
  load_dotenv()
6
  AZURE_OPENAI_API_KEY = os.getenv('AZURE_OPENAI_API_KEY')
src/application/text/search.py CHANGED
@@ -4,7 +4,7 @@ import string
4
  import requests
5
  from dotenv import load_dotenv
6
  from nltk.corpus import stopwords
7
- from nltk.tokenize import sent_tokenize, word_tokenize
8
  from sklearn.feature_extraction.text import TfidfVectorizer
9
 
10
  from src.application.text.identity import extract_entities
 
4
  import requests
5
  from dotenv import load_dotenv
6
  from nltk.corpus import stopwords
7
+ from nltk.tokenize import word_tokenize
8
  from sklearn.feature_extraction.text import TfidfVectorizer
9
 
10
  from src.application.text.identity import extract_entities