File size: 4,414 Bytes
aa64e27
 
 
 
d50f546
 
 
 
 
 
 
 
 
 
 
 
 
aa64e27
d50f546
aa64e27
31e2d7f
 
aa64e27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97c8695
aa64e27
97c8695
 
19fd718
b6214e2
a4ae764
b6214e2
a4ae764
b6214e2
a4ae764
 
b6214e2
a4ae764
489d2fc
9afb882
1331b43
fc0d126
 
 
d128919
 
 
 
43ca02e
fc0d126
 
d128919
 
d3248e4
1331b43
fc0d126
1331b43
fc0d126
aa64e27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97c8695
aa64e27
eaf1505
 
 
aa64e27
 
97c8695
92f57e2
489d2fc
4070897
489d2fc
aa64e27
 
23fe586
aa64e27
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import gradio as gr
import requests
from bs4 import BeautifulSoup
import re
'''
from selenium import webdriver
from bs4 import BeautifulSoup

url = 'https://www.wikipedia.org/'
driver = webdriver.Chrome()
driver.get(url)

html_page = driver.page_source
soup = BeautifulSoup(html_page, 'html.parser')
title = soup.title.string

print(title)

driver.quit()'''
def search_fn(query,count):
    if count>40:
        count = 40
    page = requests.get(f"https://www.google.com/search?q={query}&num={count}")
    soup = BeautifulSoup(page.content)
    #links = soup.findAll("a")
    
    links = soup.findAll("a")
    file = open("myfile.txt", "w")

    for link in soup.find_all("a",href=re.compile("(?<=/url\?q=)(htt.*://.*)")):
        out = (re.split(":(?=http)",link["href"].replace("/url?q=","").split("&sa",1)[0]))
        out = out[0]
        rr=requests.get(f"{out}")
        x_opt = (dict(rr.headers).get("x-frame-options"))
        if x_opt == None:
            frame_l=f'<div class="container-mee"><div class="put-on-top"><a target="_blank" href="{out}">{out}</a></div><iframe class="responsive-iframe-mee" src="{out}" frameborder="3"></iframe></div>'
            file.writelines(frame_l)
        else:
            pass
      
        #print(file1.read())
        print (out)
        print(dict(rr.headers).get("x-frame-options"))

    file.close()
        
    with open("myfile.txt", "r") as file1:
        html_out = file1.read()
    out = format_t(html_out)  
    return out
def details_fn(query):

    page = requests.get(f"{query}")
    #links = soup.findAll("a")
    soup = BeautifulSoup(page.content, 'html.parser')
    try:
        title = soup.title.string
    except Exception as e:
        title = query
    try:
        description = soup.find('meta', attrs={'name':'description'})
        description = description['content']
    except Exception as e:
        description = title
    out = f"""
    <center><h3>{title}</h3><br>{description}</center>"""
    try:
        image_out="""
        <style>
        """
        images = soup.findAll('img')
        for img in images:
            
            image_out += f"""
            <div>
            <img src={img['src']}>
            </div>
            """
            print (img['src'])
        format_out = format_t(f'<div class="container-mee">{image_out}</div>')
    except Exeption as e:
        format_out = "None"
        print (e)
    return out,format_out

def first():
    out = '''<h1>Loading'''
    return out

def test(out):
    return format_t(f'<div class="container-mee"><div class="put-on-top"><a target="_blank" href="{out}">{out}</a></div><iframe class="responsive-iframe-mee" src="{out}" frameborder="3"></iframe></div>')

def format_t(inp):

    style = '''
    .put-on-top{
    align-contents:center;
    border-style: solid;
    border-width: 3px;
    border-radius: 5px;
    background: none;
    padding: 0.5em;
    margin-top:1em;
    margin-bottom:0.3em;
    }
    .grid-mee {
	display: flex;
	flex-direction: row;
	flex-wrap: wrap;
	justify-content: space-evenly;
	align-items: stretch;
	align-content: space-evenly;
}
    
    .container-mee {
  
  position: relative;
  overflow: hidden;
  width: 48%;
  height: 60em;
  margin-top:1em;
}
/* Then style the iframe to fit in the container div with full height and width */
.responsive-iframe-mee {
  position: relative;
  top: 0;
  left: 0;
  bottom: 0;
  right: 0;
  width: 100%;
  height: 100%;
    
}
  '''
    out = f'''<!DOCTYPE html>
<html lang="en">
  <head>
  </head>
  <style>
  {style}
  </style>
  <body>
  <div class=grid-mee>
    {inp}
  </div>
  </body>
</html>'''
    return out
with gr.Blocks() as app:
    gr.HTML("""<h1>Interactive Social Media Card Maker</h1>""")
    gr.HTML("""<h3><b>Step 1:</b> Enter a URL with Iframe capability</h3>""")
    with gr.Row():
        search_box=gr.Textbox(label = "Enter a search topic here to find URL's",scale=2)
        num_return=gr.Number(label= "Number of URL's to return", value=20, scale=1)
        search_btn=gr.Button(value= "Search", scale=1)
    with gr.Row():
        input = gr.Textbox(label = "URL")
        btn = gr.Button("Preview")
    details = gr.HTML("""""")
    output = gr.HTML("""""")
    images = gr.HTML("""""")


    search_btn.click(search_fn,[search_box,num_return],output)
    btn.click(first,None,output).then(test,input,output).then(details_fn,input,[details,images])
app.launch()