Spaces:
Sleeping
Sleeping
import json | |
import openai | |
from dotenv import load_dotenv | |
import os | |
load_dotenv() | |
AZURE_OPENAI_API_KEY = os.getenv('AZURE_OPENAI_API_KEY') | |
AZURE_OPENAI_ENDPOINT = os.getenv('AZURE_OPENAI_ENDPOINT') | |
AZURE_OPENAI_API_VERSION = os.getenv('AZURE_OPENAI_API_VERSION') | |
client = openai.AzureOpenAI( | |
api_version = AZURE_OPENAI_API_VERSION, | |
api_key = AZURE_OPENAI_API_KEY, | |
azure_endpoint = AZURE_OPENAI_ENDPOINT, | |
) | |
def generate_fake_text(text_generation_model, title, content): | |
# Generate text using the selected models | |
prompt = """Generate a random fake news tittle in this format: | |
--- | |
# Title: [Fake Title] | |
# Content: | |
[Fake Content] | |
--- | |
""" | |
if title and content: | |
prompt += """base on the following context: | |
# Title: {news_title}:\n# Content: {news_content}""" | |
elif title: | |
prompt += """base on the following context: | |
# Title: {news_title}:\n""" | |
elif content: | |
prompt += """base on the following context: | |
# Content: {news_content}""" | |
# Generate text using the text generation model | |
# Generate text using the selected model | |
try: | |
response = client.chat.completions.create( | |
model=text_generation_model, | |
messages = [{"role": "system", "content": prompt}], | |
) | |
print("Response from OpenAI API: ", response.choices[0].message.content) | |
fake_text = response.choices[0].message.content | |
except openai.OpenAIError as e: | |
print(f"Error interacting with OpenAI API: {e}") | |
fake_text = "" | |
if fake_text != "": | |
fake_title, fake_content = extract_title_content(fake_text) | |
return fake_title, fake_content | |
def extract_title_content(fake_news): | |
""" | |
Extracts the title and content from the generated fake news string. | |
This function parses a string containing fake news, which is expected to have | |
a specific format with a title and content section marked by '# Title:' and | |
'# Content:' respectively. | |
Args: | |
fake_news (str): A string containing the generated fake news in the expected format. | |
Returns: | |
tuple: A tuple containing two elements: | |
- title (str): The extracted title of the fake news. | |
- content (str): The extracted content of the fake news. | |
Note: | |
The function assumes that the input string follows the expected format. | |
If the format is not as expected, it may return unexpected results. | |
""" | |
# Extract the title and content from the generated fake news | |
title_start_index = fake_news.find("# Title: ") + len("# Title: ") | |
title_end_index = fake_news.find("\n", title_start_index) | |
title = fake_news[title_start_index:title_end_index].strip() | |
content_start_index = fake_news.find("\n# Content: ") + len("\n# Content: ") | |
content = fake_news[content_start_index:].strip() | |
return title, content | |
def generate_fake_image(model, title): | |
if len(title) > 0: | |
IMAGE_PROMPT = f"Generate a random image about {title}" | |
else: | |
IMAGE_PROMPT = "Generate a random image" | |
result = client.images.generate( | |
model="dall-e-3", # the name of your DALL-E 3 deployment | |
prompt=IMAGE_PROMPT, | |
n=1 | |
) | |
image_url = json.loads(result.model_dump_json())['data'][0]['url'] | |
return image_url | |
def replace_text(news_title, news_content, replace_df): | |
""" | |
Replaces occurrences in the input text based on the provided DataFrame. | |
Args: | |
text: The input text. | |
replace_df: A pandas DataFrame with two columns: "find_what" and "replace_with". | |
Returns: | |
The text after all replacements have been made. | |
""" | |
for _, row in replace_df.iterrows(): | |
find_what = row["Find what:"] | |
replace_with = row["Replace with:"] | |
news_content = news_content.replace(find_what, replace_with) | |
news_title = news_title.replace(find_what, replace_with) | |
return news_title, news_content |