File size: 3,978 Bytes
da7dbd0
38fd181
 
1ce1659
 
 
 
38fd181
 
 
1ce1659
 
38fd181
 
 
 
 
1ce1659
da7dbd0
1ce1659
38fd181
da7dbd0
 
38fd181
da7dbd0
 
 
1ce1659
da7dbd0
 
1ce1659
da7dbd0
 
1ce1659
da7dbd0
 
38fd181
1ce1659
38fd181
1ce1659
 
38fd181
 
 
 
 
 
 
1ce1659
da7dbd0
1ce1659
 
 
38fd181
 
da7dbd0
 
 
 
 
 
 
 
 
38fd181
 
 
da7dbd0
 
38fd181
da7dbd0
 
 
 
 
 
 
 
 
 
 
 
 
 
38fd181
 
 
 
da7dbd0
38fd181
da7dbd0
 
38fd181
da7dbd0
 
 
 
 
 
38fd181
da7dbd0
38fd181
da7dbd0
38fd181
da7dbd0
38fd181
 
1ce1659
 
 
 
 
 
38fd181
1ce1659
 
 
 
 
 
 
 
 
38fd181
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import json
import os

import openai
from dotenv import load_dotenv

load_dotenv()
AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION")

client = openai.AzureOpenAI(
    api_version=AZURE_OPENAI_API_VERSION,
    api_key=AZURE_OPENAI_API_KEY,
    azure_endpoint=AZURE_OPENAI_ENDPOINT,
)


def generate_fake_text(text_generation_model, title, content):
    # Generate text using the selected models
    prompt = """Generate a random fake news tittle in this format:
    ---
    # Title: [Fake Title]
    # Content:
    [Fake Content]
    ---
    """
    if title and content:
        prompt += """base on the following context:
        # Title: {news_title}:\n# Content: {news_content}"""
    elif title:
        prompt += """base on the following context:
        # Title: {news_title}:\n"""
    elif content:
        prompt += """base on the following context:
        # Content: {news_content}"""

    # Generate text using the text generation model
    # Generate text using the selected model
    try:
        response = client.chat.completions.create(
            model=text_generation_model,
            messages=[{"role": "system", "content": prompt}],
        )

        print(
            "Response from OpenAI API: ",
            response.choices[0].message.content,
        )
        fake_text = response.choices[0].message.content

    except openai.OpenAIError as e:
        print(f"Error interacting with OpenAI API: {e}")
        fake_text = ""

    if fake_text != "":
        fake_title, fake_content = extract_title_content(fake_text)
    return fake_title, fake_content


def extract_title_content(fake_news):
    """
    Extracts the title and content from the generated fake news string.

    This function parses a string containing fake news, which is expected
    to have a specific format with a title and content section marked by
    '# Title:' and '# Content:' respectively.

    Args:
        fake_news (str): A string containing the generated fake news.

    Returns:
        tuple: A tuple containing two elements:
            - title (str): The extracted title of the fake news.
            - content (str): The extracted content of the fake news.

    Note:
        The function assumes that the input string follows the expected format.
        If the format is not as expected, it may return unexpected results.
    """
    # Extract the title and content from the generated fake news
    title_start_index = fake_news.find("# Title: ") + len("# Title: ")
    title_end_index = fake_news.find("\n", title_start_index)
    title = fake_news[title_start_index:title_end_index].strip()

    content_start_index = fake_news.find("\n# Content: ") + len(
        "\n# Content: ",
    )
    content = fake_news[content_start_index:].strip()

    return title, content


def generate_fake_image(model, title):
    if len(title) > 0:
        IMAGE_PROMPT = f"Generate a random image about {title}"
    else:
        IMAGE_PROMPT = "Generate a random image"
    result = client.images.generate(
        model="dall-e-3",  # the name of your DALL-E 3 deployment
        prompt=IMAGE_PROMPT,
        n=1,
    )
    image_url = json.loads(result.model_dump_json())["data"][0]["url"]
    return image_url


def replace_text(news_title, news_content, replace_df):
    """
    Replaces occurrences in the input text based on the provided DataFrame.

    Args:
        text: The input text.
        replace_df: A DF with 2 columns: "find_what" & "replace_with".

    Returns:
        The text after all replacements have been made.
    """
    for _, row in replace_df.iterrows():
        find_what = row["Find what:"]
        replace_with = row["Replace with:"]
        news_content = news_content.replace(find_what, replace_with)
        news_title = news_title.replace(find_what, replace_with)
    return news_title, news_content