index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,200 | c4f437e6f5aaeccb6dd0948c3ed1f1d465bb29ce | <mask token>
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print('Listening......')
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa', '')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play', '')
talk('playing ' + song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is ' + time)
print(time)
<mask token>
| <mask token>
engine.setProperty('voice', voices[10].id)
<mask token>
engine.setProperty('rate', 150)
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print('Listening......')
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa', '')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play', '')
talk('playing ' + song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is ' + time)
print(time)
run_alexa()
| <mask token>
listner = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[10].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', 150)
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print('Listening......')
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa', '')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play', '')
talk('playing ' + song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is ' + time)
print(time)
run_alexa()
| import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
listner = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[10].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', 150)
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print('Listening......')
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa', '')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play', '')
talk('playing ' + song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is ' + time)
print(time)
run_alexa()
| import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
listner = sr.Recognizer()
engine = pyttsx3.init()
#change voices
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[10].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', 150)
#for machine to say
def talk(text):
engine.say(text)
engine.runAndWait()
def takeCommand():
try:
with sr.Microphone() as sc:
print("Listening......")
vc = listner.listen(sc)
cmd = listner.recognize_google(vc)
cmd = cmd.lower()
if 'alexa' in cmd:
cmd = cmd.replace('alexa','')
except:
pass
return cmd
def run_alexa():
command = takeCommand()
print(command)
if 'play' in command:
song = command.replace('play','')
talk('playing '+song)
pywhatkit.playonyt(song)
if 'time' in command:
time = datetime.datetime.now().strftime('%I:%M %p')
talk('time is '+time)
print(time)
run_alexa() | [
3,
4,
5,
6,
7
] |
1,201 | 4ed730369cf065936569a8515de44042829c2143 | <mask token>
def writeUniquerecords(dirpath, filenames):
sourcepath = os.path.join(dirpath, filenames)
with open(sourcepath, 'r') as fp:
lines = fp.readlines()
destination_lines = []
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile = (
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
)
destipath = os.path.join(destinationfile, filenames)
with open(destipath, 'w+') as destination:
destination.write('\n'.join(destination_lines))
<mask token>
| <mask token>
def writeUniquerecords(dirpath, filenames):
sourcepath = os.path.join(dirpath, filenames)
with open(sourcepath, 'r') as fp:
lines = fp.readlines()
destination_lines = []
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile = (
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
)
destipath = os.path.join(destinationfile, filenames)
with open(destipath, 'w+') as destination:
destination.write('\n'.join(destination_lines))
def Readandwrite():
for dirpath, dirnames, filenames in os.walk(
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'
):
print('Current Path', dirpath)
print('Current Folder names', dirnames)
print('Current Files names ', filenames)
for file in filenames:
writeUniquerecords(dirpath, file)
<mask token>
| <mask token>
def writeUniquerecords(dirpath, filenames):
sourcepath = os.path.join(dirpath, filenames)
with open(sourcepath, 'r') as fp:
lines = fp.readlines()
destination_lines = []
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile = (
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
)
destipath = os.path.join(destinationfile, filenames)
with open(destipath, 'w+') as destination:
destination.write('\n'.join(destination_lines))
def Readandwrite():
for dirpath, dirnames, filenames in os.walk(
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'
):
print('Current Path', dirpath)
print('Current Folder names', dirnames)
print('Current Files names ', filenames)
for file in filenames:
writeUniquerecords(dirpath, file)
Readandwrite()
| import os
from test.test_unicode_file_functions import filenames
def writeUniquerecords(dirpath, filenames):
sourcepath = os.path.join(dirpath, filenames)
with open(sourcepath, 'r') as fp:
lines = fp.readlines()
destination_lines = []
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile = (
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
)
destipath = os.path.join(destinationfile, filenames)
with open(destipath, 'w+') as destination:
destination.write('\n'.join(destination_lines))
def Readandwrite():
for dirpath, dirnames, filenames in os.walk(
'/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'
):
print('Current Path', dirpath)
print('Current Folder names', dirnames)
print('Current Files names ', filenames)
for file in filenames:
writeUniquerecords(dirpath, file)
Readandwrite()
| import os
from test.test_unicode_file_functions import filenames
def writeUniquerecords(dirpath,filenames):
sourcepath=os.path.join(dirpath,filenames)
with open(sourcepath,'r') as fp:
lines= fp.readlines()
destination_lines=[]
for line in lines:
if line not in destination_lines:
destination_lines.append(line)
destinationfile='/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/UpdatedFolder'
destipath=os.path.join(destinationfile,filenames)
with open(destipath, "w+")as destination:
destination.write("\n".join(destination_lines))
def Readandwrite():
for dirpath,dirnames,filenames in os.walk('/Users/vijayakarthikeyanarul/git/python_Skills/com/filehandling/locators'):
print('Current Path',dirpath)
print('Current Folder names',dirnames)
print('Current Files names ',filenames)
for file in filenames:
writeUniquerecords(dirpath,file)
Readandwrite() | [
1,
2,
3,
4,
5
] |
1,202 | b65d25198d55ab4a859b9718b7b225fa92c13a2b | <mask token>
| <mask token>
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
<mask token>
| <mask token>
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
| from whylogs.core.annotation_profiling import Rectangle
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':
'test'}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
| from whylogs.core.annotation_profiling import Rectangle
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{"name": "test"}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
| [
0,
1,
2,
3,
4
] |
1,203 | e78c4f65d84d5b33debb415005e22f926e14d7d4 | <mask token>
class Vintage:
<mask token>
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
<mask token>
<mask token>
<mask token>
<mask token>
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<mask token>
| <mask token>
class Vintage:
<mask token>
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<mask token>
| <mask token>
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<mask token>
| <mask token>
def get_dataframes(csvfile, spec=SPEC):
"""Extract dataframes from *csvfile* using *spec* parsing instructions.
Args:
csvfile (file connection or StringIO) - CSV file for parsing
spec (spec.Specification) - pasing instructions, defaults to spec.SPEC
Returns:
Three pandas dataframes at annual, qtr and monthly frequencies
in a dictionary.
"""
tables = [t for csv_segment, pdef in Reader(csvfile, spec).items() for
t in extract_tables(csv_segment, pdef)]
emitter = Emitter(tables)
return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print('Saved dataframe to', path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print('Test values parsed OK for', self)
return True
def __repr__(self):
return 'Vintage({}, {})'.format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print('Checking', year, month)
vintage = Vintage(year, month)
vintage.validate()
<mask token>
| """Get pandas dataframes for a given data and month.
*get_dataframes(csvfile, spec=SPEC)* is a function to get dataframes
from *csvfile* connection under *spec* parsing instruction.
*Vintage* class addresses dataset by year and month:
Vintage(year, month).save()
Vintage(year, month).validate()
*Collection* manipulates all datasets, released at various dates:
Collection.save_all()
Collection.save_latest()
Collection.approve_latest()
Collection.approve_all()
"""
from config import LocalCSV, LATEST_DATE, SUPPORTED_DATES
from csv2df.specification import SPEC
from csv2df.reader import Reader, open_csv
from csv2df.parser import extract_tables
from csv2df.emitter import Emitter
from csv2df.validator import Validator
__all__ = ['get_dataframes', 'Vintage', 'Collection']
FREQUENCIES = ['a', 'q', 'm']
def get_dataframes(csvfile, spec=SPEC):
"""Extract dataframes from *csvfile* using *spec* parsing instructions.
Args:
csvfile (file connection or StringIO) - CSV file for parsing
spec (spec.Specification) - pasing instructions, defaults to spec.SPEC
Returns:
Three pandas dataframes at annual, qtr and monthly frequencies
in a dictionary.
"""
tables = [t for csv_segment, pdef in Reader(csvfile, spec).items()
for t in extract_tables(csv_segment, pdef)]
emitter = Emitter(tables)
return {freq: emitter.get_dataframe(freq) for freq in FREQUENCIES}
class Vintage:
"""Represents dataset release for a given year and month."""
def __init__(self, year, month):
self.year, self.month = year, month
self.csv = LocalCSV(year, month)
@property
def dfs(self):
with open_csv(self.csv.interim) as csvfile:
return get_dataframes(csvfile)
def save(self):
for freq, df in self.dfs.items():
path = self.csv.processed(freq)
df.to_csv(path)
print("Saved dataframe to", path)
return True
def validate(self):
checker = Validator(*[self.dfs[freq] for freq in FREQUENCIES])
checker.run()
print("Test values parsed OK for", self)
return True
def __repr__(self):
return "Vintage({}, {})".format(self.year, self.month)
class Collection:
"""Methods to manipulate entire set of data releases."""
all_dates = SUPPORTED_DATES
latest_vintage = Vintage(*LATEST_DATE)
@classmethod
def save_latest(cls):
cls.latest_vintage.save()
@classmethod
def approve_latest(cls):
"""Quick check for algorithm on latest available data."""
cls.latest_vintage.validate()
@classmethod
def save_all(cls):
for year, month in cls.all_dates:
Vintage(year, month).save()
@classmethod
def approve_all(cls):
"""Checks all dates, runs for about 1-2 min of a fast computer.
May fail if dataset not complete, eg word2csv written only part
of CSV file.
"""
for year, month in cls.all_dates:
print("Checking", year, month)
vintage = Vintage(year, month)
vintage.validate()
if __name__ == "__main__":
# Collection calls
# Collection.approve_latest()
# Collection.approve_all()
# Collection.save_latest()
# Collection.save_all()
# sample Vintage call
year, month = 2015, 5
vint = Vintage(year, month)
vint.validate()
#dfa, dfq, dfm = vint.dfs()
| [
9,
13,
14,
15,
19
] |
1,204 | 5ab877ef15cdcd52463b1567c28327dc2eeea2de | <mask token>
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
<mask token>
| <mask token>
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
| <mask token>
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
| from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
| from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
# PhantomJS 设置不加载图片
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
# chrome 设置不加载图片
chrome_options = webdriver.ChromeOptions()
chrome_profile = {"profile.managed_default_content_settings.images": 2}
chrome_options.add_experimental_option("prefs", chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = (By.ID, id)
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.presence_of_element_located(locator))
print(u"成功访问搜索引擎!")
except Exception as e:
print(e)
print(u"搜索引擎未加载成功,浏览器将被退出!")
driver.quit() | [
1,
2,
3,
4,
5
] |
1,205 | a21942a835f7b2ea70e9dd7b26285ea2dd411750 | class person(object):
<mask token>
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls):
return cls.population
@staticmethod
def isadult(age=17):
return age >= 18
def display(self):
print(self.name, 'is', self.age, 'years old')
<mask token>
| class person(object):
population = 50
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls):
return cls.population
@staticmethod
def isadult(age=17):
return age >= 18
def display(self):
print(self.name, 'is', self.age, 'years old')
<mask token>
| class person(object):
population = 50
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls):
return cls.population
@staticmethod
def isadult(age=17):
return age >= 18
def display(self):
print(self.name, 'is', self.age, 'years old')
<mask token>
print(new.getpopulation())
print(new.isadult(12))
print(new.display())
| class person(object):
population = 50
def __init__(self, name, age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls):
return cls.population
@staticmethod
def isadult(age=17):
return age >= 18
def display(self):
print(self.name, 'is', self.age, 'years old')
new = person('Santo', 22)
print(new.getpopulation())
print(new.isadult(12))
print(new.display())
| null | [
5,
6,
7,
8
] |
1,206 | 50ae47c88bbc0f281ef75784377fb65192e257b0 | <mask token>
class DLT(object):
<mask token>
def getimg(self, idx):
images = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
return cv2.imread(images[idx])
<mask token>
def estimatePoseDLT(self, p, P, idx):
"""
DLT algorithm. Refer to http://www.kwon3d.com/theory/dlt/dlt.html for in-depth analysis
Solves for projection matrix M = [R|t], given the n 2D-3D points corresponding to p_i and P_i
***Note: Matrix Q is built using the /normalized/ coordinates of p_i
SVD returns V already transposed
Args:
p = given 2D coordinates (u,v) of the projections of the referenced 3D points in the undistorted image
P = given position coordinates of the n reference 3D points given in the world coordinates
K = given camera matrix
Returns:
M = The solved projection matrix (for normalized coordinates)
"""
for col_idx in range(0, P.shape[1]):
if col_idx == 0:
Q = np.array([[P[0, col_idx], P[1, col_idx], P[2, col_idx],
1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx], -p[0,
col_idx] * P[1, col_idx], -p[0, col_idx] * P[2, col_idx
], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx], P[1,
col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
else:
currQ = np.array([[P[0, col_idx], P[1, col_idx], P[2,
col_idx], 1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx],
-p[0, col_idx] * P[1, col_idx], -p[0, col_idx] * P[2,
col_idx], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx],
P[1, col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
Q = np.vstack((Q, currQ)).astype(np.float32)
U, S, V = np.linalg.svd(Q, full_matrices=True)
M = V[-1:]
M = M.reshape((3, 4))
if np.linalg.det(M[:, :3]) < 0:
M = -M
"""
Orthogonal Procrustes problem:
Did not impose any constraints on R from M = [R|t] is actually a rotation matrix;
Need to compute matrix R_tilde, the matrix closest to the true "R" in the sense of Frobenius norm
"""
R = M[:, :3]
U, S, V = np.linalg.svd(R)
R_tilde = U @ V
alpha = np.linalg.norm(R_tilde, ord='fro') / np.linalg.norm(R, ord=
'fro')
M = np.hstack((R_tilde, alpha * M[:, -1].reshape((3, 1))))
return M
def reprojectPoints(self, P, M):
"""
Reprojects the 3D points P_i in the current image using the estimated projection matrix M
and camera matrix K. Use this to show on image to double check that reprojected points p_i' fall close to
points p_i.
Args:
P = referenced 3D world coordinates
M = Projection matrix solved from estimatePoseDLT
org_image = the original image, needed to project points onto
Returns:
reprojected_pts = self-explanatory
"""
homo_mtx = (K @ M @ P).T
homo_mtx[:, 0] = homo_mtx[:, 0] / homo_mtx[:, 2]
homo_mtx[:, 1] = homo_mtx[:, 1] / homo_mtx[:, 2]
reprojected_pts = homo_mtx[:, :2]
return reprojected_pts
<mask token>
| <mask token>
class DLT(object):
def __init__(self, K, detected_corners, Pw_corners, reproject_points=False
):
self.K = K
self.p = detected_corners
self.Pw = Pw_corners
self.reproject_points = reproject_points
def getimg(self, idx):
images = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
return cv2.imread(images[idx])
def currFrame(self, frame_idx):
u = self.p[frame_idx][0:-1:2]
v = self.p[frame_idx][1::2]
p = np.linalg.inv(self.K) @ np.vstack((u, v, np.ones(u.shape[0])))
P = np.vstack((self.Pw.T, np.ones(self.Pw.shape[0])))
return p, P
def estimatePoseDLT(self, p, P, idx):
"""
DLT algorithm. Refer to http://www.kwon3d.com/theory/dlt/dlt.html for in-depth analysis
Solves for projection matrix M = [R|t], given the n 2D-3D points corresponding to p_i and P_i
***Note: Matrix Q is built using the /normalized/ coordinates of p_i
SVD returns V already transposed
Args:
p = given 2D coordinates (u,v) of the projections of the referenced 3D points in the undistorted image
P = given position coordinates of the n reference 3D points given in the world coordinates
K = given camera matrix
Returns:
M = The solved projection matrix (for normalized coordinates)
"""
for col_idx in range(0, P.shape[1]):
if col_idx == 0:
Q = np.array([[P[0, col_idx], P[1, col_idx], P[2, col_idx],
1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx], -p[0,
col_idx] * P[1, col_idx], -p[0, col_idx] * P[2, col_idx
], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx], P[1,
col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
else:
currQ = np.array([[P[0, col_idx], P[1, col_idx], P[2,
col_idx], 1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx],
-p[0, col_idx] * P[1, col_idx], -p[0, col_idx] * P[2,
col_idx], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx],
P[1, col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
Q = np.vstack((Q, currQ)).astype(np.float32)
U, S, V = np.linalg.svd(Q, full_matrices=True)
M = V[-1:]
M = M.reshape((3, 4))
if np.linalg.det(M[:, :3]) < 0:
M = -M
"""
Orthogonal Procrustes problem:
Did not impose any constraints on R from M = [R|t] is actually a rotation matrix;
Need to compute matrix R_tilde, the matrix closest to the true "R" in the sense of Frobenius norm
"""
R = M[:, :3]
U, S, V = np.linalg.svd(R)
R_tilde = U @ V
alpha = np.linalg.norm(R_tilde, ord='fro') / np.linalg.norm(R, ord=
'fro')
M = np.hstack((R_tilde, alpha * M[:, -1].reshape((3, 1))))
return M
def reprojectPoints(self, P, M):
"""
Reprojects the 3D points P_i in the current image using the estimated projection matrix M
and camera matrix K. Use this to show on image to double check that reprojected points p_i' fall close to
points p_i.
Args:
P = referenced 3D world coordinates
M = Projection matrix solved from estimatePoseDLT
org_image = the original image, needed to project points onto
Returns:
reprojected_pts = self-explanatory
"""
homo_mtx = (K @ M @ P).T
homo_mtx[:, 0] = homo_mtx[:, 0] / homo_mtx[:, 2]
homo_mtx[:, 1] = homo_mtx[:, 1] / homo_mtx[:, 2]
reprojected_pts = homo_mtx[:, :2]
return reprojected_pts
<mask token>
| <mask token>
class DLT(object):
def __init__(self, K, detected_corners, Pw_corners, reproject_points=False
):
self.K = K
self.p = detected_corners
self.Pw = Pw_corners
self.reproject_points = reproject_points
def getimg(self, idx):
images = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
return cv2.imread(images[idx])
def currFrame(self, frame_idx):
u = self.p[frame_idx][0:-1:2]
v = self.p[frame_idx][1::2]
p = np.linalg.inv(self.K) @ np.vstack((u, v, np.ones(u.shape[0])))
P = np.vstack((self.Pw.T, np.ones(self.Pw.shape[0])))
return p, P
def estimatePoseDLT(self, p, P, idx):
"""
DLT algorithm. Refer to http://www.kwon3d.com/theory/dlt/dlt.html for in-depth analysis
Solves for projection matrix M = [R|t], given the n 2D-3D points corresponding to p_i and P_i
***Note: Matrix Q is built using the /normalized/ coordinates of p_i
SVD returns V already transposed
Args:
p = given 2D coordinates (u,v) of the projections of the referenced 3D points in the undistorted image
P = given position coordinates of the n reference 3D points given in the world coordinates
K = given camera matrix
Returns:
M = The solved projection matrix (for normalized coordinates)
"""
for col_idx in range(0, P.shape[1]):
if col_idx == 0:
Q = np.array([[P[0, col_idx], P[1, col_idx], P[2, col_idx],
1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx], -p[0,
col_idx] * P[1, col_idx], -p[0, col_idx] * P[2, col_idx
], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx], P[1,
col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
else:
currQ = np.array([[P[0, col_idx], P[1, col_idx], P[2,
col_idx], 1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx],
-p[0, col_idx] * P[1, col_idx], -p[0, col_idx] * P[2,
col_idx], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx],
P[1, col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
Q = np.vstack((Q, currQ)).astype(np.float32)
U, S, V = np.linalg.svd(Q, full_matrices=True)
M = V[-1:]
M = M.reshape((3, 4))
if np.linalg.det(M[:, :3]) < 0:
M = -M
"""
Orthogonal Procrustes problem:
Did not impose any constraints on R from M = [R|t] is actually a rotation matrix;
Need to compute matrix R_tilde, the matrix closest to the true "R" in the sense of Frobenius norm
"""
R = M[:, :3]
U, S, V = np.linalg.svd(R)
R_tilde = U @ V
alpha = np.linalg.norm(R_tilde, ord='fro') / np.linalg.norm(R, ord=
'fro')
M = np.hstack((R_tilde, alpha * M[:, -1].reshape((3, 1))))
return M
def reprojectPoints(self, P, M):
"""
Reprojects the 3D points P_i in the current image using the estimated projection matrix M
and camera matrix K. Use this to show on image to double check that reprojected points p_i' fall close to
points p_i.
Args:
P = referenced 3D world coordinates
M = Projection matrix solved from estimatePoseDLT
org_image = the original image, needed to project points onto
Returns:
reprojected_pts = self-explanatory
"""
homo_mtx = (K @ M @ P).T
homo_mtx[:, 0] = homo_mtx[:, 0] / homo_mtx[:, 2]
homo_mtx[:, 1] = homo_mtx[:, 1] / homo_mtx[:, 2]
reprojected_pts = homo_mtx[:, :2]
return reprojected_pts
def plotTrajectory3D(M, fig, output_filename='motion.avi'):
R = M[:, :3].T
t = M[:, -1]
rotMat = Rotation.from_matrix(R)
quat = rotMat.as_quat()
quat = np.roll(quat, 1)
transl = -R @ t
plt.clf()
ax = plt.axes(projection='3d')
camera = Camera(fig)
ax.set(xlim=(-0.1, 0.4), ylim=(-0.2, 0.3), zlim=(-0.3, 0))
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.scatter(-Pw_corners[:, 2], Pw_corners[:, 0], -Pw_corners[:, 1])
r = Rectangle((0, -0.22), width=0.105, height=0.14, color='blue', fill=
False, hatch='/')
ax.add_patch(r)
art3d.pathpatch_2d_to_3d(r, z=0, zdir='x')
r1 = Rectangle((0.11, -0.25), width=0.13, height=0.1, color='red', fill
=False, hatch='/')
ax.add_patch(r1)
art3d.pathpatch_2d_to_3d(r1, z=0.2, zdir='y')
r2 = Rectangle((0.11, 0), width=0.13, height=0.11, color='green', fill=
False, hatch='/')
ax.add_patch(r2)
art3d.pathpatch_2d_to_3d(r2, z=-0.265, zdir='z')
rotMat = rotMat.as_matrix()
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 0], rotMat[0, 0
], -rotMat[1, 0], color='red', length=0.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 1], rotMat[0, 1
], -rotMat[1, 1], color='green', length=0.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 2], rotMat[1, 2
], -rotMat[1, 2], color='blue', length=0.1)
camera.snap()
<mask token>
| <mask token>
class DLT(object):
def __init__(self, K, detected_corners, Pw_corners, reproject_points=False
):
self.K = K
self.p = detected_corners
self.Pw = Pw_corners
self.reproject_points = reproject_points
def getimg(self, idx):
images = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
return cv2.imread(images[idx])
def currFrame(self, frame_idx):
u = self.p[frame_idx][0:-1:2]
v = self.p[frame_idx][1::2]
p = np.linalg.inv(self.K) @ np.vstack((u, v, np.ones(u.shape[0])))
P = np.vstack((self.Pw.T, np.ones(self.Pw.shape[0])))
return p, P
def estimatePoseDLT(self, p, P, idx):
"""
DLT algorithm. Refer to http://www.kwon3d.com/theory/dlt/dlt.html for in-depth analysis
Solves for projection matrix M = [R|t], given the n 2D-3D points corresponding to p_i and P_i
***Note: Matrix Q is built using the /normalized/ coordinates of p_i
SVD returns V already transposed
Args:
p = given 2D coordinates (u,v) of the projections of the referenced 3D points in the undistorted image
P = given position coordinates of the n reference 3D points given in the world coordinates
K = given camera matrix
Returns:
M = The solved projection matrix (for normalized coordinates)
"""
for col_idx in range(0, P.shape[1]):
if col_idx == 0:
Q = np.array([[P[0, col_idx], P[1, col_idx], P[2, col_idx],
1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx], -p[0,
col_idx] * P[1, col_idx], -p[0, col_idx] * P[2, col_idx
], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx], P[1,
col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
else:
currQ = np.array([[P[0, col_idx], P[1, col_idx], P[2,
col_idx], 1, 0, 0, 0, 0, -p[0, col_idx] * P[0, col_idx],
-p[0, col_idx] * P[1, col_idx], -p[0, col_idx] * P[2,
col_idx], -p[0, col_idx]], [0, 0, 0, 0, P[0, col_idx],
P[1, col_idx], P[2, col_idx], 1, -p[1, col_idx] * P[0,
col_idx], -p[1, col_idx] * P[1, col_idx], -p[1, col_idx
] * P[2, col_idx], -p[1, col_idx]]])
Q = np.vstack((Q, currQ)).astype(np.float32)
U, S, V = np.linalg.svd(Q, full_matrices=True)
M = V[-1:]
M = M.reshape((3, 4))
if np.linalg.det(M[:, :3]) < 0:
M = -M
"""
Orthogonal Procrustes problem:
Did not impose any constraints on R from M = [R|t] is actually a rotation matrix;
Need to compute matrix R_tilde, the matrix closest to the true "R" in the sense of Frobenius norm
"""
R = M[:, :3]
U, S, V = np.linalg.svd(R)
R_tilde = U @ V
alpha = np.linalg.norm(R_tilde, ord='fro') / np.linalg.norm(R, ord=
'fro')
M = np.hstack((R_tilde, alpha * M[:, -1].reshape((3, 1))))
return M
def reprojectPoints(self, P, M):
"""
Reprojects the 3D points P_i in the current image using the estimated projection matrix M
and camera matrix K. Use this to show on image to double check that reprojected points p_i' fall close to
points p_i.
Args:
P = referenced 3D world coordinates
M = Projection matrix solved from estimatePoseDLT
org_image = the original image, needed to project points onto
Returns:
reprojected_pts = self-explanatory
"""
homo_mtx = (K @ M @ P).T
homo_mtx[:, 0] = homo_mtx[:, 0] / homo_mtx[:, 2]
homo_mtx[:, 1] = homo_mtx[:, 1] / homo_mtx[:, 2]
reprojected_pts = homo_mtx[:, :2]
return reprojected_pts
def plotTrajectory3D(M, fig, output_filename='motion.avi'):
R = M[:, :3].T
t = M[:, -1]
rotMat = Rotation.from_matrix(R)
quat = rotMat.as_quat()
quat = np.roll(quat, 1)
transl = -R @ t
plt.clf()
ax = plt.axes(projection='3d')
camera = Camera(fig)
ax.set(xlim=(-0.1, 0.4), ylim=(-0.2, 0.3), zlim=(-0.3, 0))
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.scatter(-Pw_corners[:, 2], Pw_corners[:, 0], -Pw_corners[:, 1])
r = Rectangle((0, -0.22), width=0.105, height=0.14, color='blue', fill=
False, hatch='/')
ax.add_patch(r)
art3d.pathpatch_2d_to_3d(r, z=0, zdir='x')
r1 = Rectangle((0.11, -0.25), width=0.13, height=0.1, color='red', fill
=False, hatch='/')
ax.add_patch(r1)
art3d.pathpatch_2d_to_3d(r1, z=0.2, zdir='y')
r2 = Rectangle((0.11, 0), width=0.13, height=0.11, color='green', fill=
False, hatch='/')
ax.add_patch(r2)
art3d.pathpatch_2d_to_3d(r2, z=-0.265, zdir='z')
rotMat = rotMat.as_matrix()
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 0], rotMat[0, 0
], -rotMat[1, 0], color='red', length=0.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 1], rotMat[0, 1
], -rotMat[1, 1], color='green', length=0.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2, 2], rotMat[1, 2
], -rotMat[1, 2], color='blue', length=0.1)
camera.snap()
if __name__ == '__main__':
datadir = 'data/'
detected_corners = np.loadtxt(datadir + 'detected_corners.txt')
K = np.loadtxt(datadir + 'K.txt')
Pw_corners = 0.01 * np.loadtxt('data/p_W_corners.txt', delimiter=',')
file_list = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
num_images = len(file_list)
projection = DLT(K, detected_corners, Pw_corners, reproject_points=False)
fig = plt.figure()
for img_idx in range(0, num_images):
image = projection.getimg(img_idx)
p, P = projection.currFrame(img_idx)
M = projection.estimatePoseDLT(p, P, img_idx)
reprojected_pts = projection.reprojectPoints(P, M)
if projection.reproject_points:
for point in reprojected_pts:
estimate = point.astype(np.float32)
cv2.circle(image, tuple(estimate), radius=5, color=(0, 0,
255), thickness=2)
for u, v in zip(detected_corners[img_idx][0::2],
detected_corners[img_idx][1::2]):
u = u.astype(np.float32)
v = v.astype(np.float32)
cv2.circle(image, (u, v), radius=5, color=(0, 255, 0),
thickness=2)
cv2.imshow('img', image)
cv2.waitKey(34)
plotTrajectory3D(M, fig)
fname = 'my_results' + '/' + file_list[img_idx][28:28 + 4] + '.png'
plt.savefig(fname)
| import numpy as np
import cv2
import glob
from scipy.spatial.transform import Rotation
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Rectangle
import celluloid
from celluloid import Camera # couldn't save animation with ArtisticAnimation, TO DO
# datadir = 'data/'
# detected_corners = np.loadtxt(datadir + 'detected_corners.txt') # pixel coords (u,v) of detected corners
# K = np.loadtxt(datadir + 'K.txt') # camera matrix
# Pw_corners = .01 * np.loadtxt('data/p_W_corners.txt', delimiter=',') # [12x3] world coords of detected corners in centimeters
class DLT(object):
def __init__(self, K, detected_corners, Pw_corners, reproject_points=False):
self.K = K
self.p = detected_corners
self.Pw = Pw_corners
self.reproject_points = reproject_points
def getimg(self, idx):
images = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
return cv2.imread(images[idx])
# def currFrame(detected_corners, K, Pw_corners, frame_idx):
def currFrame(self, frame_idx):
# get normalized coordinates [x;y;1]
u = self.p[frame_idx][0:-1:2]
v = self.p[frame_idx][1::2]
p = np.linalg.inv(self.K) @ np.vstack((u,v,np.ones(u.shape[0])))
# get 3d world coordinates [X; Y; Z; 1]
P = np.vstack((self.Pw.T, np.ones(self.Pw.shape[0])))
return p, P
def estimatePoseDLT(self, p, P,idx):
'''
DLT algorithm. Refer to http://www.kwon3d.com/theory/dlt/dlt.html for in-depth analysis
Solves for projection matrix M = [R|t], given the n 2D-3D points corresponding to p_i and P_i
***Note: Matrix Q is built using the /normalized/ coordinates of p_i
SVD returns V already transposed
Args:
p = given 2D coordinates (u,v) of the projections of the referenced 3D points in the undistorted image
P = given position coordinates of the n reference 3D points given in the world coordinates
K = given camera matrix
Returns:
M = The solved projection matrix (for normalized coordinates)
'''
# construct Q matrix
for col_idx in range(0, P.shape[1]):
if col_idx == 0:
Q = np.array([
[P[0,col_idx], P[1,col_idx], P[2,col_idx], 1, 0, 0, 0, 0, -p[0, col_idx]*P[0,col_idx], -p[0, col_idx]*P[1,col_idx], -p[0, col_idx]*P[2,col_idx], -p[0, col_idx]],
[0, 0, 0, 0, P[0,col_idx], P[1,col_idx], P[2,col_idx], 1, -p[1, col_idx]*P[0,col_idx], -p[1, col_idx]*P[1,col_idx], -p[1, col_idx]*P[2,col_idx], -p[1, col_idx]]
])
else:
currQ = np.array([
[P[0,col_idx], P[1,col_idx], P[2,col_idx], 1, 0, 0, 0, 0, -p[0, col_idx]*P[0,col_idx], -p[0, col_idx]*P[1,col_idx], -p[0, col_idx]*P[2,col_idx], -p[0, col_idx]],
[0, 0, 0, 0, P[0,col_idx], P[1,col_idx], P[2,col_idx], 1, -p[1, col_idx]*P[0,col_idx], -p[1, col_idx]*P[1,col_idx], -p[1, col_idx]*P[2,col_idx], -p[1, col_idx]]
])
Q = np.vstack((Q,currQ)).astype(np.float32)
U, S, V = np.linalg.svd(Q, full_matrices=True)
M = V[-1:]
M = M.reshape((3,4)) # reshape to true projection matrix
if np.linalg.det(M[:,:3]) < 0:
M = -M
'''
Orthogonal Procrustes problem:
Did not impose any constraints on R from M = [R|t] is actually a rotation matrix;
Need to compute matrix R_tilde, the matrix closest to the true "R" in the sense of Frobenius norm
'''
R = M[:,:3] # rotation matrix
U,S,V = np.linalg.svd(R)
R_tilde = U @ V
# M is not true M in this case, but alpha*M where alpha is the scale
alpha = np.linalg.norm(R_tilde, ord='fro') / np.linalg.norm(R, ord='fro')
M = np.hstack((R_tilde, alpha*M[:,-1].reshape((3,1))))
return M
def reprojectPoints(self, P, M):
'''
Reprojects the 3D points P_i in the current image using the estimated projection matrix M
and camera matrix K. Use this to show on image to double check that reprojected points p_i' fall close to
points p_i.
Args:
P = referenced 3D world coordinates
M = Projection matrix solved from estimatePoseDLT
org_image = the original image, needed to project points onto
Returns:
reprojected_pts = self-explanatory
'''
homo_mtx = (K @ M @ P).T
homo_mtx[:,0] = homo_mtx[:,0] / homo_mtx[:,2]
homo_mtx[:,1] = homo_mtx[:,1] / homo_mtx[:,2]
reprojected_pts = homo_mtx[:,:2]
# print(reprojected_pts)
return reprojected_pts
def plotTrajectory3D(M, fig, output_filename='motion.avi'):
R = M[:,:3].T
t = M[:,-1]
rotMat = Rotation.from_matrix(R) # Rotation object instance
quat = rotMat.as_quat()
quat = np.roll(quat, 1)
transl = -R @ t
# prelims
plt.clf()
# ax = fig.add_subplot(111, projection='3d')
ax = plt.axes(projection='3d')
camera = Camera(fig)
ax.set(xlim=(-.1,.4), ylim=(-.2,.3), zlim=(-.3, 0))
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.scatter(-Pw_corners[:,2], Pw_corners[:,0], -Pw_corners[:,1]) # draw given corners
# draw rectangles at corners
r = Rectangle((0, -.22), width=.105, height=.14, color='blue', fill=False, hatch='/')
ax.add_patch(r)
art3d.pathpatch_2d_to_3d(r, z=0, zdir='x')
r1 = Rectangle((.11,-.25), width=.13, height=.1, color='red', fill=False, hatch='/')
ax.add_patch(r1)
art3d.pathpatch_2d_to_3d(r1, z=.2, zdir='y')
r2 = Rectangle((.11, 0), width=.13, height=.11, color='green', fill=False, hatch='/')
ax.add_patch(r2)
art3d.pathpatch_2d_to_3d(r2, z=-.265, zdir='z')
# draw camera coordinate frame onto image
rotMat = rotMat.as_matrix()
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2,0], rotMat[0,0], -rotMat[1,0], color='red', length=.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2,1], rotMat[0,1], -rotMat[1,1], color='green', length=.1)
ax.quiver(-transl[2], transl[0], -transl[1], -rotMat[2,2], rotMat[1,2], -rotMat[1,2], color='blue', length=.1)
# print([-transl[2], transl[0], -transl[1], -rotMat[2,0], rotMat[0,0], -rotMat[1,0]])
camera.snap()
if __name__ == "__main__":
# Given info
datadir = 'data/'
detected_corners = np.loadtxt(datadir + 'detected_corners.txt') # pixel coords (u,v) of detected corners
K = np.loadtxt(datadir + 'K.txt') # camera matrix
Pw_corners = .01 * np.loadtxt('data/p_W_corners.txt', delimiter=',') # [12x3] world coords of detected corners in centimeters
# Iterate through each picture
file_list = sorted(glob.glob(datadir + 'images_undistorted/*.jpg'))
# num_images = len(glob.glob(datadir + 'images_undistorted/*.jpg'))
num_images = len(file_list)
projection = DLT(K, detected_corners, Pw_corners, reproject_points=False)
fig = plt.figure()
for img_idx in range(0, num_images):
image = projection.getimg(img_idx) # get current image in directory
p, P = projection.currFrame(img_idx) # get normalized 2D pixel points and 3D world points in correct format
M = projection.estimatePoseDLT(p, P, img_idx) # get projection matrix M = [R|t]
reprojected_pts = projection.reprojectPoints(P, M) # reproject P_i onto image
if projection.reproject_points:
# show reprojected points on image
for point in reprojected_pts:
estimate = point.astype(np.float32) # my estimated points
cv2.circle(image, tuple(estimate), radius=5, color=(0,0,255), thickness=2)
for u, v in zip(detected_corners[img_idx][0::2], detected_corners[img_idx][1::2]):
u = u.astype(np.float32)
v = v.astype(np.float32)
cv2.circle(image, (u,v), radius=5, color=(0,255,0), thickness=2)
cv2.imshow('img', image)
cv2.waitKey(34) # 30 FPS
plotTrajectory3D(M, fig)
fname = 'my_results' + '/' + file_list[img_idx][28:28+4] + '.png'
plt.savefig(fname) # to create animation, save all of the figures into my_results directory, then run animate.py, which will produce a video
| [
4,
6,
7,
8,
10
] |
1,207 | 480e6ae9eee70b2da58ca5624a43d8f5dcae1d33 | <mask token>
class SecondaryStructureExtractorTest(unittest.TestCase):
<mask token>
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(
StructureToPolymerChains()).filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
<mask token>
| <mask token>
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster('local[*]').setAppName(
'secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ['1STP', '4HHB']
self.pdb = download_mmtf_files(pdbIds, self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(
StructureToPolymerChains()).filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
<mask token>
| <mask token>
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster('local[*]').setAppName(
'secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ['1STP', '4HHB']
self.pdb = download_mmtf_files(pdbIds, self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(
StructureToPolymerChains()).filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
| import unittest
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.datasets import secondaryStructureExtractor
from mmtfPyspark.filters import ContainsLProteinChain
from mmtfPyspark.mappers import StructureToPolymerChains
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster('local[*]').setAppName(
'secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ['1STP', '4HHB']
self.pdb = download_mmtf_files(pdbIds, self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(
StructureToPolymerChains()).filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
import unittest
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.datasets import secondaryStructureExtractor
from mmtfPyspark.filters import ContainsLProteinChain
from mmtfPyspark.mappers import StructureToPolymerChains
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster("local[*]").setAppName('secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ["1STP","4HHB"]
self.pdb = download_mmtf_files(pdbIds,self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()) \
.flatMap(StructureToPolymerChains()) \
.filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
| [
3,
4,
5,
6,
7
] |
1,208 | 3b959481f7c818ec35b8af174b1982954b4c72eb | <mask token>
class RegistrationFormCaseInsensitive(RegistrationForm):
<mask token>
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[User.USERNAME_FIELD].validators.append(validators.
CaseInsensitiveUnique(User, User.USERNAME_FIELD, validators.
DUPLICATE_USERNAME))
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput, label=_(
'I have read and agree to the Terms of Service'), error_messages={
'required': validators.TOS_REQUIRED})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
self.fields[email_field].validators.append(validators.
CaseInsensitiveUnique(User, email_field, validators.
DUPLICATE_EMAIL))
| <mask token>
class RegistrationForm(UserCreationForm):
<mask token>
class Meta(UserCreationForm.Meta):
fields = [User.USERNAME_FIELD, User.get_email_field_name(),
'password1', 'password2']
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
if hasattr(self, 'reserved_names'):
reserved_names = self.reserved_names
else:
reserved_names = validators.DEFAULT_RESERVED_NAMES
username_validators = [validators.ReservedNameValidator(
reserved_names), validators.validate_confusables]
self.fields[User.USERNAME_FIELD].validators.extend(username_validators)
self.fields[email_field].validators = [validators.
HTML5EmailValidator(), validators.validate_confusables_email]
self.fields[email_field].required = True
class RegistrationFormCaseInsensitive(RegistrationForm):
"""
Subclass of ``RegistrationForm`` enforcing case-insensitive
uniqueness of usernames.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[User.USERNAME_FIELD].validators.append(validators.
CaseInsensitiveUnique(User, User.USERNAME_FIELD, validators.
DUPLICATE_USERNAME))
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput, label=_(
'I have read and agree to the Terms of Service'), error_messages={
'required': validators.TOS_REQUIRED})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
self.fields[email_field].validators.append(validators.
CaseInsensitiveUnique(User, email_field, validators.
DUPLICATE_EMAIL))
| <mask token>
User = get_user_model()
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should take care when overriding ``save()`` to respect
the ``commit=False`` argument, as several registration workflows
will make use of it to create inactive user accounts.
"""
class Meta(UserCreationForm.Meta):
fields = [User.USERNAME_FIELD, User.get_email_field_name(),
'password1', 'password2']
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
if hasattr(self, 'reserved_names'):
reserved_names = self.reserved_names
else:
reserved_names = validators.DEFAULT_RESERVED_NAMES
username_validators = [validators.ReservedNameValidator(
reserved_names), validators.validate_confusables]
self.fields[User.USERNAME_FIELD].validators.extend(username_validators)
self.fields[email_field].validators = [validators.
HTML5EmailValidator(), validators.validate_confusables_email]
self.fields[email_field].required = True
class RegistrationFormCaseInsensitive(RegistrationForm):
"""
Subclass of ``RegistrationForm`` enforcing case-insensitive
uniqueness of usernames.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[User.USERNAME_FIELD].validators.append(validators.
CaseInsensitiveUnique(User, User.USERNAME_FIELD, validators.
DUPLICATE_USERNAME))
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput, label=_(
'I have read and agree to the Terms of Service'), error_messages={
'required': validators.TOS_REQUIRED})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
self.fields[email_field].validators.append(validators.
CaseInsensitiveUnique(User, email_field, validators.
DUPLICATE_EMAIL))
| <mask token>
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import gettext_lazy as _
from . import validators
User = get_user_model()
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should take care when overriding ``save()`` to respect
the ``commit=False`` argument, as several registration workflows
will make use of it to create inactive user accounts.
"""
class Meta(UserCreationForm.Meta):
fields = [User.USERNAME_FIELD, User.get_email_field_name(),
'password1', 'password2']
error_css_class = 'error'
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
if hasattr(self, 'reserved_names'):
reserved_names = self.reserved_names
else:
reserved_names = validators.DEFAULT_RESERVED_NAMES
username_validators = [validators.ReservedNameValidator(
reserved_names), validators.validate_confusables]
self.fields[User.USERNAME_FIELD].validators.extend(username_validators)
self.fields[email_field].validators = [validators.
HTML5EmailValidator(), validators.validate_confusables_email]
self.fields[email_field].required = True
class RegistrationFormCaseInsensitive(RegistrationForm):
"""
Subclass of ``RegistrationForm`` enforcing case-insensitive
uniqueness of usernames.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[User.USERNAME_FIELD].validators.append(validators.
CaseInsensitiveUnique(User, User.USERNAME_FIELD, validators.
DUPLICATE_USERNAME))
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput, label=_(
'I have read and agree to the Terms of Service'), error_messages={
'required': validators.TOS_REQUIRED})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
self.fields[email_field].validators.append(validators.
CaseInsensitiveUnique(User, email_field, validators.
DUPLICATE_EMAIL))
| """
Forms and validation code for user registration.
Note that all of these forms assume your user model is similar in
structure to Django's default User class. If your user model is
significantly different, you may need to write your own form class;
see the documentation for notes on custom user models with
django-registration.
"""
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import gettext_lazy as _
from . import validators
User = get_user_model()
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should take care when overriding ``save()`` to respect
the ``commit=False`` argument, as several registration workflows
will make use of it to create inactive user accounts.
"""
# pylint: disable=too-few-public-methods
class Meta(UserCreationForm.Meta):
fields = [
User.USERNAME_FIELD,
User.get_email_field_name(),
"password1",
"password2",
]
error_css_class = "error"
required_css_class = "required"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
if hasattr(self, "reserved_names"):
reserved_names = self.reserved_names
else:
reserved_names = validators.DEFAULT_RESERVED_NAMES
username_validators = [
validators.ReservedNameValidator(reserved_names),
validators.validate_confusables,
]
self.fields[User.USERNAME_FIELD].validators.extend(username_validators)
# django-registration's email validation is significantly stricter than Django's
# default email validation, which means that leaving Django's default validation
# on only causes confusion due to duplicate error messages (see GitHub issue
# #238). So we apply only the django-registration validators, not the default
# Django validator, on the email field.
self.fields[email_field].validators = [
validators.HTML5EmailValidator(),
validators.validate_confusables_email,
]
self.fields[email_field].required = True
class RegistrationFormCaseInsensitive(RegistrationForm):
"""
Subclass of ``RegistrationForm`` enforcing case-insensitive
uniqueness of usernames.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[User.USERNAME_FIELD].validators.append(
validators.CaseInsensitiveUnique(
User, User.USERNAME_FIELD, validators.DUPLICATE_USERNAME
)
)
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(
widget=forms.CheckboxInput,
label=_("I have read and agree to the Terms of Service"),
error_messages={"required": validators.TOS_REQUIRED},
)
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email_field = User.get_email_field_name()
self.fields[email_field].validators.append(
validators.CaseInsensitiveUnique(
User, email_field, validators.DUPLICATE_EMAIL
)
)
| [
8,
12,
14,
15,
16
] |
1,209 | cfa7dc295c635bbdf707f1e899c4fbf8ea91df9a | <mask token>
| <mask token>
for line in infile:
line = line.strip()
my_list = line.split(',')
if my_list[0] != 'ball':
continue
batsman = my_list[4]
bowler = my_list[6]
if my_list[9] == 'run out' or my_list[9] == '""' or my_list[9
] == 'retired hurt':
dismissed = '0'
else:
dismissed = '1'
print('%s,%s\t%s\t%s' % (batsman, bowler, dismissed, '1'))
| <mask token>
infile = sys.stdin
for line in infile:
line = line.strip()
my_list = line.split(',')
if my_list[0] != 'ball':
continue
batsman = my_list[4]
bowler = my_list[6]
if my_list[9] == 'run out' or my_list[9] == '""' or my_list[9
] == 'retired hurt':
dismissed = '0'
else:
dismissed = '1'
print('%s,%s\t%s\t%s' % (batsman, bowler, dismissed, '1'))
| import sys
import csv
infile = sys.stdin
for line in infile:
line = line.strip()
my_list = line.split(',')
if my_list[0] != 'ball':
continue
batsman = my_list[4]
bowler = my_list[6]
if my_list[9] == 'run out' or my_list[9] == '""' or my_list[9
] == 'retired hurt':
dismissed = '0'
else:
dismissed = '1'
print('%s,%s\t%s\t%s' % (batsman, bowler, dismissed, '1'))
| #!/usr/bin/python3
import sys
import csv
infile = sys.stdin
for line in infile:
line = line.strip()
my_list = line.split(',')
if my_list[0] != "ball":
continue
batsman = my_list[4]
bowler = my_list[6]
if my_list[9] == 'run out' or my_list[9] == '""' or my_list[9] == "retired hurt":
dismissed = '0'
else:
dismissed = '1'
print('%s,%s\t%s\t%s' % (batsman,bowler,dismissed,'1'))
| [
0,
1,
2,
3,
4
] |
1,210 | 7a01bffa5d7f0d5ecff57c97478f2cf5e9a27538 | <mask token>
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
<mask token>
| <mask token>
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
<mask token>
| <mask token>
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help=
'Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help=
'Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help=
'Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help=
'Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help=
'using random_patches True or False', default=False)
args = parser.parse_args()
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
test(dataset, device, args.save_results_name, pretrained_model=args.
path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
| import torch, torchvision
import torch.nn.functional as F
import transformers
from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertPreTrainingHeads
from utils import construct_bert_input, EvaluationDataset, save_json
from fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size,
eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self, input_ids, embeds, att_mask, embeds_n, att_mask_n
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds.to(device),
attention_mask=att_mask.to(device), labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device), only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids.to(device), is_paired=torch.tensor(False).to(
device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids, 'score': score_n, 'label':
False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p,
input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device), labels=input_ids_p.to(
device), is_paired=torch.tensor(True).to(device),
only_alignment=True)
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item()
score_pos_dict = {'text': input_ids_p, 'score': score_p, 'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(embeds=embeds_n[n].to(
device), attention_mask=att_mask_n[n].to(device), labels=
input_ids_n[n].to(device), is_paired=torch.tensor(False).to
(device), only_alignment=True)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item()
score_neg_dict = {'text': input_ids_n[n], 'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key
=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, found / len(
dict_scores.keys()))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(self, embeds, attention_mask, labels=None,
is_paired=None, only_alignment=False):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds, attention_mask=
attention_mask, return_dict=True)
sequence_output = outputs.last_hidden_state
pooler_output = outputs.pooler_output
text_output = sequence_output[:, :labels.shape[1], :]
image_output = sequence_output[:, labels.shape[1]:, :]
prediction_scores, alignment_scores = self.cls(text_output,
pooler_output)
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator,
alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
text_pred_logits = text_evaluator['text_pred_logits']
text_labels = text_evaluator['text_labels']
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten()
alig_pred_logits = alignment_evaluator['alignment_logits']
alig_labels = alignment_evaluator['alignment_labels']
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten()
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0,
embeds_neg.shape[1] - neg_input_id_sample.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
all_scores_query = evaluator.img2text_scores(input_ids_p=input_ids,
embeds_p=embeds, att_mask_p=attention_mask_mm, input_ids_n=
all_neg_inputs, embeds_n=all_embeds_neg, att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=
device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids,
evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] -
input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_scores_query = evaluator.text2img_scores(input_ids=input_ids,
embeds=embeds, att_mask=attention_mask_mm, embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
txt_acc, alig_acc = evaluator.get_scores_and_metrics(embeds,
attention_mask_mm, labels=input_ids, is_paired=is_paired,
only_alignment=False)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None,
random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle
=False)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model,
return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased',
return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (patches, neg_patches, input_ids, attention_mask,
neg_input_ids, neg_attention_mask, img_name) in enumerate(tqdm(
dataloader)):
is_paired = 1.0
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
query_dict_im2txt[img_name[0]] = im2txt_query_scores
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(
patches, neg_patches, input_ids, is_paired, attention_mask,
neg_input_ids, neg_attention_mask, evaluator, random_patches)
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = running_acc_pred_im2txt / len(dataloader)
im2txt_test_set_accuracy_alig = running_acc_alignment_im2txt / len(
dataloader)
txt2im_test_set_accuracy_pred = running_acc_pred_txt2im / len(dataloader)
txt2im_test_set_accuracy_alig = running_acc_alignment_txt2im / len(
dataloader)
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(
im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(
im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(
txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(
txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help=
'Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help=
'Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help=
'Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help=
'Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help=
'using random_patches True or False', default=False)
args = parser.parse_args()
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
test(dataset, device, args.save_results_name, pretrained_model=args.
path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
| import torch, torchvision
import torch.nn.functional as F
import transformers
from transformers import BertTokenizer, BertModel
from transformers.models.bert.modeling_bert import BertPreTrainingHeads
from utils import construct_bert_input, EvaluationDataset, save_json
from fashionbert_evaluator_parser import Evaluation_negpairs, get_all_paired_test_set
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class FashionbertEvaluator(transformers.BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.im_to_embedding = torch.nn.Linear(2048, 768)
self.im_to_embedding_norm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def text2img_scores(self,
input_ids,
embeds,
att_mask,
embeds_n, # list
att_mask_n, # list
):
"""
INPUTS:
input_ids [1, 448]
embeds: [1, 512, 768]
att_mask: [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds.to(device),
attention_mask=att_mask.to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids.to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids,
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def img2text_scores(self, input_ids_p, embeds_p, att_mask_p, input_ids_n, embeds_n, att_mask_n):
"""
INPUTS:
input_ids_p : [1, 448]
embeds_p: [1, 512, 768]
att_mask_p: [1, 448]
input_ids_n: list with 100 of [1, 448]
embeds_n: list with 100 of [1, 512, 768]
att_mask_n: list with 100 of [1, 448]
"""
# Score for positive
query_dict_scores = []
query_scores = []
query_labels = []
score_pos = self.get_scores_and_metrics(
embeds=embeds_p.to(device),
attention_mask=att_mask_p.to(device),
labels=input_ids_p.to(device),
is_paired=torch.tensor(True).to(device),
only_alignment=True,
)
# label = score_pos[1]
score_p = score_pos[0].squeeze()
score_p = score_p[1].detach().item() # confidence that is actually positive
score_pos_dict = {'text': input_ids_p,
'score': score_p,
'label': True}
query_dict_scores.append(score_pos_dict)
query_scores.append(score_p)
query_labels.append(True)
# Scores for negative
for n in range(len(embeds_n)):
score_neg = self.get_scores_and_metrics(
embeds=embeds_n[n].to(device),
attention_mask=att_mask_n[n].to(device),
labels=input_ids_n[n].to(device),
is_paired=torch.tensor(False).to(device),
only_alignment=True,
)
score_n = score_neg[0].squeeze()
score_n = score_n[1].detach().item() # confidence that is actually positive
score_neg_dict = {'text': input_ids_n[n],
'score': score_n,
'label': False}
query_dict_scores.append(score_neg_dict)
query_scores.append(score_n)
query_labels.append(False)
# print(evaluator.tokenizer.convert_ids_to_tokens(ids))
S = [(s, l) for s, l in sorted(zip(query_scores, query_labels), key=lambda x: x[0], reverse=True)]
return S
def rank_at_K(self, dict_scores, img2text=True):
logs = ''
if img2text:
l1 = '------ Image 2 Text ------\n'
logs += l1
print(l1)
else:
l2 = '------ Text 2 Image ------\n'
print(l2)
Ks = [1, 5, 10]
for K in Ks:
found = 0
for key, val in dict_scores.items():
tmp_range = K if K < len(val) else len(val)
for i in range(tmp_range):
score, label = val[i]
if label:
found += 1
break
l3 = '------ Rank @ {} = {} ------\n'.format(K, (found / len(dict_scores.keys())))
logs += l3
print(l3)
return logs
def get_scores_and_metrics(
self,
embeds, # text + image embedded
attention_mask, # text + image attention mask
labels=None, # [batch, 448]
is_paired=None, # [batch]
only_alignment=False,
):
batch_size = embeds.shape[0]
seq_length = embeds.shape[1]
hidden_dim = embeds.shape[2]
embeds = embeds.to(device)
attention_mask = attention_mask.to(device)
outputs = self.bert(inputs_embeds=embeds,
attention_mask=attention_mask,
return_dict=True)
sequence_output = outputs.last_hidden_state # [batch, seq_length, hidden_size]
pooler_output = outputs.pooler_output # [batch_size, hidden_size] last layer of hidden-state of first token (CLS) + linear layer + tanh
# hidden states corresponding to the text part
text_output = sequence_output[:, :labels.shape[1], :] # [batch, 448, 768]
# hidden states corresponding to the image part
image_output = sequence_output[:, labels.shape[1]:, :] # [batch, 64, 768]
### FOR TEXT
# Predict the masked text tokens and alignment scores (whether image and text match)
prediction_scores, alignment_scores = self.cls(text_output, pooler_output)
# prediction score is [batch, 448, vocab_size = 30522]
# aligment score is [batch, 2] 2 with logits corresponding to 1 and 0
if only_alignment:
return alignment_scores, is_paired
text_evaluator = {'text_pred_logits': prediction_scores,
'text_labels': labels}
alignment_evaluator = {'alignment_logits': alignment_scores,
'alignment_labels': is_paired}
text_acc, alig_acc = self.accuracy_scores(text_evaluator, alignment_evaluator)
return text_acc, alig_acc
def accuracy_scores(self, text_evaluator, alignment_evaluator):
"""
Text evaluator: dictionary with preds and labels (aligned)
Image evaluator: dictionary with image output and image patches (aligned)
"""
# Text
text_pred_logits = text_evaluator['text_pred_logits'] # [num_aligned, 448, vocab_size]
text_labels = text_evaluator['text_labels'] # [num_aligned, 448]
text_preds_logits = text_pred_logits.detach().cpu().numpy()
text_labels = text_labels.cpu().numpy().flatten()
text_preds = np.argmax(text_preds_logits, axis=2).flatten() # [num_algined, 448]
# Alignment
alig_pred_logits = alignment_evaluator['alignment_logits'] # [1, 2]
alig_labels = alignment_evaluator['alignment_labels'] # [2]
alig_pred_logits = alig_pred_logits.detach().cpu().numpy()
alig_labels = np.asarray([alig_labels])
# alig_labels = alig_labels.double().cpu().numpy().flatten()
alig_preds = np.argmax(alig_pred_logits, axis=1).flatten() # [1, 2]
text_acc = accuracy_score(text_labels, text_preds)
alig_acc = accuracy_score(alig_labels, alig_preds)
return text_acc, alig_acc
def image2text(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
image2text retrieval:
Query = Image
Paired with: 1 positive text, 100 negative texts
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1)
# NEGATIVE SAMPLE # [batch, 100, 448]
all_embeds_neg = []
all_att_mask = []
all_neg_inputs = []
for j in range(len_neg_inputs):
neg_input_id_sample = neg_input_ids[:, j, :] # [1, 448]
neg_attention_mask_sample = neg_attention_mask[:, j, :]
embeds_neg = construct_bert_input(patches, neg_input_id_sample, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(neg_attention_mask_sample, (0, embeds_neg.shape[1] - neg_input_id_sample.shape[1]),
value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
all_neg_inputs.append(neg_input_id_sample.detach())
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.img2text_scores(
input_ids_p=input_ids,
embeds_p=embeds,
att_mask_p=attention_mask_mm,
input_ids_n=all_neg_inputs,
embeds_n=all_embeds_neg,
att_mask_n=all_att_mask)
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def text2image(patches, neg_patches, input_ids, is_paired, attention_mask, neg_input_ids, neg_attention_mask,
evaluator, random_patches):
"""
text2image retrieval:
Query = Text
Paired with: 1 positive image, 100 negative images
"""
im_seq_len = patches.shape[1]
bs = input_ids.shape[0]
len_neg_inputs = neg_input_ids.shape[1]
# before constructing bert, att mask is 448 long
# POSITIVE IMAGE
embeds = construct_bert_input(patches, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_mm = F.pad(attention_mask, (0, embeds.shape[1] - input_ids.shape[1]), value=1) # [1, 512]
# NEGATIVE SAMPLES
all_embeds_neg = []
all_att_mask = []
for p in range(len_neg_inputs):
neg_patches_sample = neg_patches[:, p, :, :]
embeds_neg = construct_bert_input(neg_patches_sample, input_ids, evaluator, device=device, random_patches=random_patches)
attention_mask_neg = F.pad(attention_mask, (0, embeds_neg.shape[1] - input_ids.shape[1]), value=1)
all_embeds_neg.append(embeds_neg)
all_att_mask.append(attention_mask_neg)
# Now I have all joint embeddings for 1 positive sample and 100 neg samples
all_scores_query = evaluator.text2img_scores(
input_ids=input_ids,
embeds=embeds,
att_mask=attention_mask_mm,
embeds_n=all_embeds_neg, # list
att_mask_n=all_att_mask) # list
# Accuracy: only in positive example
txt_acc, alig_acc = evaluator.get_scores_and_metrics(
embeds, # text + image embedded
attention_mask_mm, # [batch,
labels=input_ids, # [batch, 448]
is_paired=is_paired, # [batch]
only_alignment=False,
)
return all_scores_query, txt_acc, alig_acc
def test(dataset, device, save_file_name, pretrained_model=None, random_patches=False):
torch.cuda.empty_cache()
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=False,
)
if pretrained_model != None:
evaluator = FashionbertEvaluator.from_pretrained(pretrained_model, return_dict=True)
else:
evaluator = FashionbertEvaluator.from_pretrained('bert-base-uncased', return_dict=True)
evaluator.to(device)
evaluator.eval()
query_dict_im2txt = {}
query_dict_txt2im = {}
running_acc_alignment_im2txt = 0.0
running_acc_pred_im2txt = 0.0
running_acc_alignment_txt2im = 0.0
running_acc_pred_txt2im = 0.0
with torch.no_grad():
for i, (
patches, neg_patches, input_ids, attention_mask, neg_input_ids, neg_attention_mask, img_name) in enumerate(
tqdm(dataloader)):
# ****** Shapes ********
# input_ids shape: [1, 448]
# neg_input_ids shape: [1, NUM_SAMPLES=100, 448]
# neg_patches: [1, NUM_SAMPLES=100, 64, 2048]
# IMAGE 2 TEXT
is_paired = 1.
# print('im2text..')
im2txt_query_scores, im2txt_pred_acc, im2txt_alig_acc = image2text(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_im2txt += im2txt_pred_acc
running_acc_alignment_im2txt += im2txt_alig_acc
# For Rank @ K
query_dict_im2txt[img_name[0]] = im2txt_query_scores
# TEXT 2 IMAGE
# print('txt2img..')
txt2im_query_scores, txt2im_pred_acc, txt2im_alig_acc = text2image(patches, neg_patches, input_ids,
is_paired, attention_mask,
neg_input_ids, neg_attention_mask,
evaluator, random_patches)
# print('done')
# Accuracies
running_acc_pred_txt2im += txt2im_pred_acc
running_acc_alignment_txt2im += txt2im_alig_acc
# For Rank @ K
query_dict_txt2im[img_name[0]] = txt2im_query_scores
im2txt_test_set_accuracy_pred = (running_acc_pred_im2txt / len(dataloader))
im2txt_test_set_accuracy_alig = (running_acc_alignment_im2txt / len(dataloader))
txt2im_test_set_accuracy_pred = (running_acc_pred_txt2im / len(dataloader))
txt2im_test_set_accuracy_alig = (running_acc_alignment_txt2im / len(dataloader))
print()
results = ''
log1 = '---- IMAGE 2 TEXT EVALUATIONS ---------------------\n'
log2 = evaluator.rank_at_K(query_dict_im2txt, True)
log3 = '---- Accuracy in token predictions: {} -----\n'.format(im2txt_test_set_accuracy_pred)
log4 = '---- Accuracy in text-image alignment: {} -----\n'.format(im2txt_test_set_accuracy_alig)
print(log1)
print(log2)
print(log3)
print(log4)
print()
log5 = '---- TEXT 2 IMAGE EVALUATIONS ---------------------\n'
log6 = evaluator.rank_at_K(query_dict_txt2im, False)
log7 = '---- Accuracy in token predictions: {} -----\n'.format(txt2im_test_set_accuracy_pred)
log8 = '---- Accuracy in text-image alignment: {} -----\n'.format(txt2im_test_set_accuracy_alig)
print(log5)
print(log6)
print(log7)
print(log8)
results += log1
results += log2
results += log3
results += log4
results += log5
results += log6
results += log7
results += log8
save_json(save_file_name, results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate FashionBert')
parser.add_argument('--path_to_train_dataset', help='Absolute path to .pkl file used for training')
parser.add_argument('--path_to_pretrained_model', help='Path to pretrained model', default=None)
parser.add_argument('--save_test_set', help='Name to save test set .pkl', default='test_set.pkl')
parser.add_argument('--save_results_name', help='Name to save file with results', default='results.json')
parser.add_argument('--random_patches', help='using random_patches True or False', default=False)
args = parser.parse_args()
# 1) Builds the 1000 sample dataset. This corresponds to the fashionibert_evaluator_parser file
print('Processing the dataset...')
dataset = EvaluationDataset(args.path_to_train_dataset)
# savefile_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
print('Done!')
print('\nGetting aligned pairs...')
get_all_paired_test_set(dataset, args.save_test_set, num_samples=1000)
# print('Done!')
# 2) Evaluate-
# eval_set_path = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/evaluation_set_fashionbert_vanilla.pkl'
# path_to_trained_model = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/'
# path_to_save_json = '../../../__fashionbert_trained/fashionbert_vanilla_adaptive/results.json'
print('Loading dataset...')
dataset = Evaluation_negpairs(args.save_test_set)
print('Starting evaluation...')
# test(dataset, device, args.num_subsamples, args.save_file_name, args.path_to_pretrained_model)
test(dataset, device, args.save_results_name, pretrained_model=args.path_to_pretrained_model, random_patches=args.random_patches)
print('Done!!!')
| [
8,
10,
12,
13,
14
] |
1,211 | 7c60ae58b26ae63ba7c78a28b72192373cc05a86 | <mask token>
| <mask token>
while True:
for i in range(0, 8):
temp = str(23 + i) + '-05-21'
for pincode in pincodes:
req = Request(
'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='
+ pincode + '&date=' + temp, headers={'User-Agent':
'Mozilla/5.0'})
webpage = urlopen(req).read()
data = json.loads(webpage)
for center in data['centers']:
for session in center['sessions']:
print('\t', center['name'])
print('\t', center['address'])
print('\t Price: ', center['fee_type'])
print('\t', session['vaccine'])
print('\t Age limit:', session['min_age_limit'])
print('\t Available Capacity: ', session[
'available_capacity'])
print(
'////////////////////////////////////////////////////')
if int(session['available_capacity']) > 0:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login('[email protected]',
'password')
if pincode == '784164':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
elif pincode == '781017':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
else:
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.quit()
time.sleep(20)
| <mask token>
today = date.today().strftime('%d-%m-%y')
count = 0
pincodes = ['784164', '781017', '784161', '787001']
date = 0
temp = str(14) + '-05-21'
while True:
for i in range(0, 8):
temp = str(23 + i) + '-05-21'
for pincode in pincodes:
req = Request(
'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='
+ pincode + '&date=' + temp, headers={'User-Agent':
'Mozilla/5.0'})
webpage = urlopen(req).read()
data = json.loads(webpage)
for center in data['centers']:
for session in center['sessions']:
print('\t', center['name'])
print('\t', center['address'])
print('\t Price: ', center['fee_type'])
print('\t', session['vaccine'])
print('\t Age limit:', session['min_age_limit'])
print('\t Available Capacity: ', session[
'available_capacity'])
print(
'////////////////////////////////////////////////////')
if int(session['available_capacity']) > 0:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login('[email protected]',
'password')
if pincode == '784164':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
elif pincode == '781017':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
else:
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.quit()
time.sleep(20)
| import smtplib
import requests
import datetime
import json
import time
from datetime import date
from urllib.request import Request, urlopen
today = date.today().strftime('%d-%m-%y')
count = 0
pincodes = ['784164', '781017', '784161', '787001']
date = 0
temp = str(14) + '-05-21'
while True:
for i in range(0, 8):
temp = str(23 + i) + '-05-21'
for pincode in pincodes:
req = Request(
'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='
+ pincode + '&date=' + temp, headers={'User-Agent':
'Mozilla/5.0'})
webpage = urlopen(req).read()
data = json.loads(webpage)
for center in data['centers']:
for session in center['sessions']:
print('\t', center['name'])
print('\t', center['address'])
print('\t Price: ', center['fee_type'])
print('\t', session['vaccine'])
print('\t Age limit:', session['min_age_limit'])
print('\t Available Capacity: ', session[
'available_capacity'])
print(
'////////////////////////////////////////////////////')
if int(session['available_capacity']) > 0:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login('[email protected]',
'password')
if pincode == '784164':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
elif pincode == '781017':
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
else:
server.sendmail('[email protected]',
'[email protected]',
'Vaccine available , Kindly check your cowin app'
)
server.quit()
time.sleep(20)
| import smtplib
import requests
import datetime
import json
import time
from datetime import date
from urllib.request import Request,urlopen
today = date.today().strftime("%d-%m-%y")
count = 0
pincodes = ["784164","781017","784161","787001"]
date = 0
temp = str(14) + "-05-21"
while True:
for i in range(0,8):
temp = str(23+i) + "-05-21"
for pincode in pincodes:
req = Request(
"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=" + pincode + "&date=" + temp,
headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
data = json.loads(webpage)
for center in data["centers"]:
for session in center["sessions"]:
print("\t", center["name"])
print("\t", center["address"])
print("\t Price: ", center["fee_type"])
print("\t", session["vaccine"])
print("\t Age limit:", session["min_age_limit"])
print("\t Available Capacity: ", session["available_capacity"])
print("////////////////////////////////////////////////////")
if int(session["available_capacity"]) > 0:
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login("[email protected]", "password")
if pincode == "784164":
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
elif pincode == "781017":
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
else:
server.sendmail("[email protected]", "[email protected]",
"Vaccine available , Kindly check your cowin app")
server.quit()
time.sleep(20)
| [
0,
1,
2,
3,
4
] |
1,212 | 45a57fac564f23253f9d9cd5d0fd820e559c15b9 | <mask token>
class APIAuditAgent(AuditAgent):
<mask token>
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
<mask token>
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
| <mask token>
class APIAuditAgent(AuditAgent):
<mask token>
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
def endpoint_response(self) ->Response:
"""
access the response of the endpoint URL
Returns
--------
Response
Http response
"""
return self._resp
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
| <mask token>
class APIAuditAgent(AuditAgent):
"""
Captures the audit trail using a REST endpoint URL (POST)
Add this agent to Auditor in order to capture audit log to an endpoint.
Note
-----------
1. If user wants to POST custom JSON request body then,
pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)
2. After each call to capture() or capture_custom() latest response is preserved
until next endpoint request.
To get the response, after each invocation please call endpoint_response() to get response
"""
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
def endpoint_response(self) ->Response:
"""
access the response of the endpoint URL
Returns
--------
Response
Http response
"""
return self._resp
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
| import requests
from requests import Response
from auditlogging.Trail import Trail
from utils.Utils import is_empty
from auditlogging.agents.AuditAgent import AuditAgent
class APIAuditAgent(AuditAgent):
"""
Captures the audit trail using a REST endpoint URL (POST)
Add this agent to Auditor in order to capture audit log to an endpoint.
Note
-----------
1. If user wants to POST custom JSON request body then,
pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)
2. After each call to capture() or capture_custom() latest response is preserved
until next endpoint request.
To get the response, after each invocation please call endpoint_response() to get response
"""
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
def endpoint_response(self) ->Response:
"""
access the response of the endpoint URL
Returns
--------
Response
Http response
"""
return self._resp
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
| null | [
8,
9,
10,
11
] |
1,213 | 1e84b28580b97e77394be0490f3d8db3d62a2ccb | <mask token>
def sort_id(movies, titles):
ids = []
for i in titles:
try:
movie_id = MovieDB.objects.get(title=i).id
ids.append((i, movie_id))
except MovieDB.DoesNotExist:
return []
return ids
<mask token>
def sort_name(actors):
names = []
for i in actors:
names.append(str(i.name))
names.sort()
return names
def sort_actor_id(actors, names):
ids = []
for i in names:
try:
actor_id = ActorDB.objects.get(name=i).id
ids.append((i, actor_id))
except ActorDB.DoesNotExist:
return []
return ids
| <mask token>
def user_present(username):
if User.objects.filter(username=username).count():
return True
return False
<mask token>
def sort_id(movies, titles):
ids = []
for i in titles:
try:
movie_id = MovieDB.objects.get(title=i).id
ids.append((i, movie_id))
except MovieDB.DoesNotExist:
return []
return ids
def sort_tv_id(tvs, titles):
ids = []
for i in titles:
try:
tv_id = TVDB.objects.get(title=i).id
ids.append((i, tv_id))
except TVDB.DoesNotExist:
return []
return ids
def sort_name(actors):
names = []
for i in actors:
names.append(str(i.name))
names.sort()
return names
def sort_actor_id(actors, names):
ids = []
for i in names:
try:
actor_id = ActorDB.objects.get(name=i).id
ids.append((i, actor_id))
except ActorDB.DoesNotExist:
return []
return ids
| <mask token>
def user_present(username):
if User.objects.filter(username=username).count():
return True
return False
def sort_title(movies):
titles = []
for i in movies:
titles.append(str(i.title))
titles.sort()
return titles
def sort_id(movies, titles):
ids = []
for i in titles:
try:
movie_id = MovieDB.objects.get(title=i).id
ids.append((i, movie_id))
except MovieDB.DoesNotExist:
return []
return ids
def sort_tv_id(tvs, titles):
ids = []
for i in titles:
try:
tv_id = TVDB.objects.get(title=i).id
ids.append((i, tv_id))
except TVDB.DoesNotExist:
return []
return ids
def sort_name(actors):
names = []
for i in actors:
names.append(str(i.name))
names.sort()
return names
def sort_actor_id(actors, names):
ids = []
for i in names:
try:
actor_id = ActorDB.objects.get(name=i).id
ids.append((i, actor_id))
except ActorDB.DoesNotExist:
return []
return ids
| from django.contrib.auth.models import User
from rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB
def user_present(username):
if User.objects.filter(username=username).count():
return True
return False
def sort_title(movies):
titles = []
for i in movies:
titles.append(str(i.title))
titles.sort()
return titles
def sort_id(movies, titles):
ids = []
for i in titles:
try:
movie_id = MovieDB.objects.get(title=i).id
ids.append((i, movie_id))
except MovieDB.DoesNotExist:
return []
return ids
def sort_tv_id(tvs, titles):
ids = []
for i in titles:
try:
tv_id = TVDB.objects.get(title=i).id
ids.append((i, tv_id))
except TVDB.DoesNotExist:
return []
return ids
def sort_name(actors):
names = []
for i in actors:
names.append(str(i.name))
names.sort()
return names
def sort_actor_id(actors, names):
ids = []
for i in names:
try:
actor_id = ActorDB.objects.get(name=i).id
ids.append((i, actor_id))
except ActorDB.DoesNotExist:
return []
return ids
| from django.contrib.auth.models import User
from rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB
def user_present(username):
if User.objects.filter(username=username).count():
return True
return False
#Takes in a list of MovieDB/TVDB objects
#Outputs a list of sorted titles
def sort_title(movies):
titles = []
for i in movies:
titles.append(str(i.title))
titles.sort()
return titles
#Takes a list of MovieDB objects and their titles as Strings
#Output a list of tuples containing the (title, id)
def sort_id(movies, titles):
ids = []
for i in titles:
try:
movie_id = MovieDB.objects.get(title=i).id
ids.append((i, movie_id))
except MovieDB.DoesNotExist:
return []
return ids
def sort_tv_id(tvs, titles):
ids = []
for i in titles:
try:
tv_id = TVDB.objects.get(title=i).id
ids.append((i, tv_id))
except TVDB.DoesNotExist:
return []
return ids
def sort_name(actors):
names = []
for i in actors:
names.append(str(i.name))
names.sort()
return names
def sort_actor_id(actors, names):
ids = []
for i in names:
try:
actor_id = ActorDB.objects.get(name=i).id
ids.append((i, actor_id))
except ActorDB.DoesNotExist:
return []
return ids
| [
3,
5,
6,
7,
8
] |
1,214 | f2e2ebd5b848cf3a01b7304e5e194beb3eec1c10 | <mask token>
| <mask token>
def classFactory(iface):
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
| # -*- coding: utf-8 -*-
"""
/***************************************************************************
TileMapScalePlugin
A QGIS plugin
Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.
-------------------
begin : 2014-03-03
copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load TileMapScalePlugin class from file TileMapScalePlugin
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
| null | null | [
0,
1,
2
] |
1,215 | 0deec9058c6f7b77ba4fa3bfc0269c8596ce9612 | <mask token>
| '''
quarter = 0.25
dime = 0.10
nickel = 0.05
penny = 0.01
'''
#def poschg(dollar_amount,number):
| null | null | null | [
0,
1
] |
1,216 | eb9135c6bcf89a62534cfc8480e5d44a089fe5a8 | <mask token>
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
<mask token>
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
<mask token>
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
<mask token>
| <mask token>
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| <mask token>
ROUND_DURS_DIGITS = 5
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| <mask token>
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
ROUND_DURS_DIGITS = 5
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
numNotes = inEnd - inStart + 1
allNotes = songs[songName].score.flat.notes.stream()
if useTies:
beforeSlice = allNotes[:inStart - 1]
numTies = 0
for n in beforeSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
inStart += numTies
numTies = 0
inSlice = allNotes[inStart:inStart + numNotes]
for n in inSlice:
if n.tie is not None:
if n.tie.type == 'start':
numTies += 1
numNotes += numTies
pattOcc = allNotes[inStart:inStart + numNotes]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
noteNums = [x.pitch.midi for x in mel]
intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(
noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest - lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in
range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(
noteNums) - 1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
if all([(np.sign(x) == 1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([(np.sign(x) == -1) for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]
for n in range(-3, 3):
num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
yCoords = [(y - noteNums[0]) for y in noteNums]
xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]
xCoords = [(x - xtemp[0]) for x in xtemp]
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
zeros = [(0) for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = ['interval_mean', 'rhythm_variability',
'rhythm_density', 'interval_signs', 'pitch_mean',
'interval_prop_small', 'interval_prop_large']
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0
vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n -
1]) for n in range(1, len(noteBeats))]) > 0
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])
except m21.Music21ObjectException:
pass
return vec
def getFeaturesForClasses(patternClass, occs, songs):
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in
patternClass.occNames]
vec['avg_' + fk] = np.mean(allOccVals)
vec['std_' + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in
mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)
) / vec['numOccs']
vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)
) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[
'numOccs'])]
vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)
) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,
pClasses, pOccs):
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs) / 2)
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -
tarNumNotes) ** 2
candidateNameList.append([candScore, gcn])
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
def inspectFeature(featureName, table, tableNames, featsType='classFeatures'):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| # -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
| [
3,
8,
9,
10,
11
] |
1,217 | 918653cdeea8d91921f8b96779fcd3ebce491948 | #!/usr/bin/env python
class Problem1(object):
def sum_below(self, threshold):
current_number = 1
total = 0
while current_number < threshold:
if (current_number % 3 == 0) or (current_number % 5 == 0):
total += current_number
current_number += 1
return total
if __name__ == '__main__':
problem1 = Problem1()
print problem1.sum_below(1000) # == 233168 | null | null | null | null | [
0
] |
1,218 | 2acfd0bbad68bb9d55aeb39b180f4326a225f6d5 | <mask token>
| <mask token>
print(' Coefficients:')
print(' Coefficient of a = ', a)
print(' Coefficient of b = ', b)
print(' Coefficient of c = ', c)
<mask token>
print('The roots of the equation:')
print(' Root 1 =', root_1)
print(' Root 2 =', root_2)
| <mask token>
a = float(input('Enter the coeddicient a: '))
b = float(input('Enter the coeddicient b: '))
c = float(input('Enter the coeddicient c: '))
print(' Coefficients:')
print(' Coefficient of a = ', a)
print(' Coefficient of b = ', b)
print(' Coefficient of c = ', c)
root_1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
root_2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
print('The roots of the equation:')
print(' Root 1 =', root_1)
print(' Root 2 =', root_2)
| <mask token>
import math
a = float(input('Enter the coeddicient a: '))
b = float(input('Enter the coeddicient b: '))
c = float(input('Enter the coeddicient c: '))
print(' Coefficients:')
print(' Coefficient of a = ', a)
print(' Coefficient of b = ', b)
print(' Coefficient of c = ', c)
root_1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
root_2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)
print('The roots of the equation:')
print(' Root 1 =', root_1)
print(' Root 2 =', root_2)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 31 14:35:49 2019
@author: devinpowers
"""
# Lab 1 in CSE 231
#Quadratic Formula
# Find the roots in the Quadratic Formula
import math
a = float(input("Enter the coeddicient a: "))
b = float(input("Enter the coeddicient b: "))
c = float(input("Enter the coeddicient c: "))
print (" Coefficients:")
print( " Coefficient of a = ", a)
print( " Coefficient of b = ", b)
print( " Coefficient of c = ", c)
root_1 = (-b+(b**2-4*a*c)**(0.5))/(2*a)
root_2 = (-b-(b**2-4*a*c)**(0.5))/(2*a)
print("The roots of the equation:")
print( " Root 1 =", root_1)
print( " Root 2 =", root_2)
| [
0,
1,
2,
3,
4
] |
1,219 | 0276181055f2c70562c1f557a16d00ba7107d003 |
import pyximport
pyximport.install(build_in_temp=False,inplace=True)
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
import numpy as np
from test1 import c_test,c_test_result_workaround
a = np.ascontiguousarray(np.array([ [1,2,3],[1,2,3],[1,2,3] ], dtype=np.long), dtype=np.long)
print '\nStart Value:\n',a
a_transposed = a.T
ai = a_transposed[0]
i = ai[0]
j = ai[1]
k = ai[2]
print '\nExpected Value:\n',[i,j,k]
b = np.ascontiguousarray(np.empty((3,), dtype=np.long,order='C'))
x = c_test(a,b)
print '\nProblem Result:\n',np.asarray(x)
y = c_test_result_workaround(a,b)
print '\nWork-Around Result:\n',np.asarray(y) | null | null | null | null | [
0
] |
1,220 | 472a79767f5dc7dc3cd03d89999d322b3885dcbf | <mask token>
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
| <mask token>
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
| <mask token>
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
| from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from status.api.serializers import StatusInlineUserSerializer
from status.api.views import StatusAPIView
from status.models import Status
from .serializers import UserDetailSerializer
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
| from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from status.api.serializers import StatusInlineUserSerializer
from status.api.views import StatusAPIView
from status.models import Status
from .serializers import UserDetailSerializer
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = ('id',)
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get("username")
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({"detail": "Not allowed here"})
| [
4,
6,
7,
8,
9
] |
1,221 | 6109efeb3462ac2c5a94a68fbfa4f2f0617dd927 | <mask token>
| <mask token>
def extractLetters(sourceFolder, trainRatio=0.8, destFolder=
'./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
capImages = os.listdir(sourceFolder)
nImages = len(capImages)
iSplit = int(nImages * trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]
with open('trainTestSplit.dat', 'wb') as f:
pickle.dump(trainTestSplit, f)
nTrain = len(trainTestSplit[0])
for iImage, capImage in enumerate(trainTestSplit[0]):
print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))
capLabel = capImage.split('.')[0]
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.
BORDER_REPLICATE)
letterRegions = locateLetterRegions(imageData)
if len(letterRegions) != len(capLabel):
continue
for letterRegion, letterLabel in zip(letterRegions, capLabel):
x, y, w, h = letterRegion
letterImage = imageData[y:y + h, x:x + w]
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
fileName = os.path.join(savePath, '{}.png'.format(str(
letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
| <mask token>
import os
import cv2
import pickle
from utils import locateLetterRegions
def extractLetters(sourceFolder, trainRatio=0.8, destFolder=
'./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
capImages = os.listdir(sourceFolder)
nImages = len(capImages)
iSplit = int(nImages * trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]]
with open('trainTestSplit.dat', 'wb') as f:
pickle.dump(trainTestSplit, f)
nTrain = len(trainTestSplit[0])
for iImage, capImage in enumerate(trainTestSplit[0]):
print('Processing image ' + str(iImage + 1) + ' of ' + str(nTrain))
capLabel = capImage.split('.')[0]
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.
BORDER_REPLICATE)
letterRegions = locateLetterRegions(imageData)
if len(letterRegions) != len(capLabel):
continue
for letterRegion, letterLabel in zip(letterRegions, capLabel):
x, y, w, h = letterRegion
letterImage = imageData[y:y + h, x:x + w]
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
fileName = os.path.join(savePath, '{}.png'.format(str(
letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 19 09:31:20 2021
@author: dclabby
"""
import os
import cv2
import pickle
from utils import locateLetterRegions
# # Constants
# sourceFolder = '/home/dclabby/Documents/Springboard/HDAIML_SEP/Semester03/MachineLearning/Project/solving_captchas_code_examples/solving_captchas_code_examples/generated_captcha_images/'
# destFolder = './data/separateLetters'
# trainRatio = 0.8 # proportion of data set that will be used for training & validation (i.e. 1 - testRatio)
def extractLetters(sourceFolder, trainRatio=0.8, destFolder='./data/separateLetters'):
"""
Parameters
----------
sourceFolder : string
DESCRIPTION.
trainRatio : float, optional
DESCRIPTION. The default is 0.8.
destFolder : string, optional
DESCRIPTION. The default is './data/separateLetters'.
Returns
-------
None.
"""
letterCounts = {}
# Get a list of all the captcha images to be processed
capImages = os.listdir(sourceFolder)
# loop over the image paths
nImages = len(capImages)
# note: the original script uses all images for training (train/test split is implemented later, but test data is actually used for validation)
# therefore, should make a train/test split here & keep the test data separate
iSplit = int(nImages*trainRatio)
trainTestSplit = [capImages[:iSplit], capImages[iSplit:]] # [train, test]
# save the list of training and test data, so that test data can be identified later
with open('trainTestSplit.dat', "wb") as f:
pickle.dump(trainTestSplit, f)
# with open('trainTestSplit.dat', "rb") as f:
# trainTestSplit = pickle.load(f)
nTrain = len(trainTestSplit[0])
for (iImage, capImage) in enumerate(trainTestSplit[0]):#enumerate(capImages):
print('Processing image ' + str(iImage+1) + ' of ' + str(nTrain))#str(nImages))
# Separate the filename from its extension, and use filename as the captcha's label (i.e. "2A2X.png" -> "2A2X")
capLabel = capImage.split('.')[0]
# Load image
# imageData = cv2.imread(sourceFolder + capImage)
imageData = cv2.imread(os.path.join(sourceFolder, capImage))
#cv2.imshow(capLabel + ' - original', imageData)
# Convert to grayscale
imageData = cv2.cvtColor(imageData, cv2.COLOR_BGR2GRAY)
#cv2.imshow(capLabel + ' - gray', imageData)
# Add padding
imageData = cv2.copyMakeBorder(imageData, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
#cv2.imshow(capLabel + ' - padding', imageData)
# Locate letter regions
letterRegions = locateLetterRegions(imageData)
# If the number of contours does not equal the number of letters in the label it is concluded that letter extraction
# was not successful, and this example will not be used in training data
if len(letterRegions) != len(capLabel):
continue
# Save each letter as a separate image
for letterRegion, letterLabel in zip(letterRegions, capLabel):
# Get coordinates (x, y) and dimensions (w, h) of letter region
x, y, w, h = letterRegion
# extract the letter from the original image
letterImage = imageData[y:y + h, x:x + w]
# # extract the letter from the original image, with a 2 pixel margin
# letterImage = imageData[y - 2:y + h + 2, x - 2:x + w + 2] # note: image data arranged with rows corresponding to the vertical (y), & columns corresponding to the horizontal (x)
#cv2.imshow(letterLabel, letterImage)
# define folder path where letters will be saved & create folder if it does not exist
savePath = os.path.join(destFolder, letterLabel)
if not os.path.exists(savePath):
os.makedirs(savePath)
# initialize or increment the letterCounts dictionary for the key corresponding to the present letter
if letterLabel not in letterCounts:
letterCounts[letterLabel] = 1
else:
letterCounts[letterLabel] += 1
letterCount = letterCounts[letterLabel]
# write the letter image to a file based on its letter count
fileName = os.path.join(savePath, "{}.png".format(str(letterCount).zfill(6)))
cv2.imwrite(fileName, letterImage)
| null | [
0,
1,
2,
3
] |
1,222 | 2a92c47231b75a441660fed80a9bce9a35695af5 | <mask token>
| <mask token>
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
<mask token>
| <mask token>
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
try:
br = webdriver.Chrome()
lk = 'http://suninjuly.github.io/get_attribute.html'
br.get(lk)
treasure = br.find_element_by_id('treasure')
valuex = treasure.get_attribute('valuex')
radio_button = br.find_element_by_id('robotsRule')
check_box = br.find_element_by_id('robotCheckbox')
input_text = br.find_element_by_id('answer')
button = br.find_element_by_css_selector('button.btn')
answer = calc(valuex)
check_box.click()
radio_button.click()
input_text.send_keys(answer)
button.click()
finally:
time.sleep(10)
br.quit()
| from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
try:
br = webdriver.Chrome()
lk = 'http://suninjuly.github.io/get_attribute.html'
br.get(lk)
treasure = br.find_element_by_id('treasure')
valuex = treasure.get_attribute('valuex')
radio_button = br.find_element_by_id('robotsRule')
check_box = br.find_element_by_id('robotCheckbox')
input_text = br.find_element_by_id('answer')
button = br.find_element_by_css_selector('button.btn')
answer = calc(valuex)
check_box.click()
radio_button.click()
input_text.send_keys(answer)
button.click()
finally:
time.sleep(10)
br.quit()
| from selenium import webdriver
import time
import math
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
br = webdriver.Chrome();
lk = 'http://suninjuly.github.io/get_attribute.html'
br.get(lk)
#собираю
treasure=br.find_element_by_id('treasure')
valuex = treasure.get_attribute('valuex')
radio_button = br.find_element_by_id('robotsRule')
check_box = br.find_element_by_id('robotCheckbox')
input_text = br.find_element_by_id('answer')
button = br.find_element_by_css_selector('button.btn')
#раздаю
answer = calc(valuex)
check_box.click()
radio_button.click()
input_text.send_keys(answer)
button.click()
finally:
time.sleep(10)
br.quit() | [
0,
1,
2,
3,
4
] |
1,223 | 92eaceb46974ba3a5944300139d5929d44673181 | <mask token>
class GPTD_fixedGrid:
def __init__(self, env, sigma0, gamma, kernel, D, V_mu=[]):
self.env = env
self.gamma = gamma
self.sigma0 = sigma0
self.kernel = kernel.kernel
if not V_mu:
V_mu = lambda s: np.zeros((s.shape[1], 1))
self.V_mu = V_mu
self.V_D = self.V_mu(D)
self.D = D
self.A = np.zeros((self.D.shape[1], 1), dtype=np.float64, order='C')
self.A[-1, 0] = 1
K = self.kernel(self.D, self.D)
self.K_inv = np.linalg.inv(K)
self.alpha_ = np.zeros((self.D.shape[1], 1), dtype=np.float64,
order='C')
self.C_ = np.zeros((self.D.shape[1], self.D.shape[1]), dtype=np.
float64, order='C')
self.diff_alpha_CV_D = np.empty((self.D.shape[1], 1), dtype=np.
float64, order='C')
def k_(self, x):
if len(x.shape) == 1:
x = x[:, np.newaxis]
assert len(x.shape) == 2, 'Check state dimensions'
return self.kernel(self.D, np.repeat(x, self.D.shape[1], axis=1))
<mask token>
<mask token>
<mask token>
| <mask token>
class GPTD_fixedGrid:
def __init__(self, env, sigma0, gamma, kernel, D, V_mu=[]):
self.env = env
self.gamma = gamma
self.sigma0 = sigma0
self.kernel = kernel.kernel
if not V_mu:
V_mu = lambda s: np.zeros((s.shape[1], 1))
self.V_mu = V_mu
self.V_D = self.V_mu(D)
self.D = D
self.A = np.zeros((self.D.shape[1], 1), dtype=np.float64, order='C')
self.A[-1, 0] = 1
K = self.kernel(self.D, self.D)
self.K_inv = np.linalg.inv(K)
self.alpha_ = np.zeros((self.D.shape[1], 1), dtype=np.float64,
order='C')
self.C_ = np.zeros((self.D.shape[1], self.D.shape[1]), dtype=np.
float64, order='C')
self.diff_alpha_CV_D = np.empty((self.D.shape[1], 1), dtype=np.
float64, order='C')
def k_(self, x):
if len(x.shape) == 1:
x = x[:, np.newaxis]
assert len(x.shape) == 2, 'Check state dimensions'
return self.kernel(self.D, np.repeat(x, self.D.shape[1], axis=1))
def update(self, state_sequence, reward_sequence):
"""
Update GP after observing states (state_sequence) and rewards (reward_sequence)
"""
for i in range(reward_sequence.shape[0]):
trajt_1 = state_sequence[:, i][:, np.newaxis]
trajt = state_sequence[:, i + 1][:, np.newaxis]
k_t_1 = self.kernel(self.D, trajt_1)
k_t = self.kernel(self.D, trajt)
ktt = self.kernel(trajt, trajt)
at = np.dot(self.K_inv, k_t)
delk_t_1 = k_t_1 - self.gamma * k_t
ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma * at)
st = self.sigma0 ** 2 - np.dot(ct.T, delk_t_1)
diff_r = np.dot(delk_t_1.T, self.alpha_)[0, 0] - reward_sequence[i]
self.alpha_ = self.alpha_ + ct / st * diff_r
self.C_ = self.C_ + np.dot(ct, ct.T) / st
self.A = at
assert not np.isnan(self.alpha_).any(
), 'Check alpha for NaN values'
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
<mask token>
def get_value_function(self, states):
if self.D.shape[1] == 0:
return self.V_mu(states)
else:
return self.V_mu(states) + np.dot(self.kernel(self.D, states).T,
self.diff_alpha_CV_D)
| <mask token>
class GPTD_fixedGrid:
def __init__(self, env, sigma0, gamma, kernel, D, V_mu=[]):
self.env = env
self.gamma = gamma
self.sigma0 = sigma0
self.kernel = kernel.kernel
if not V_mu:
V_mu = lambda s: np.zeros((s.shape[1], 1))
self.V_mu = V_mu
self.V_D = self.V_mu(D)
self.D = D
self.A = np.zeros((self.D.shape[1], 1), dtype=np.float64, order='C')
self.A[-1, 0] = 1
K = self.kernel(self.D, self.D)
self.K_inv = np.linalg.inv(K)
self.alpha_ = np.zeros((self.D.shape[1], 1), dtype=np.float64,
order='C')
self.C_ = np.zeros((self.D.shape[1], self.D.shape[1]), dtype=np.
float64, order='C')
self.diff_alpha_CV_D = np.empty((self.D.shape[1], 1), dtype=np.
float64, order='C')
def k_(self, x):
if len(x.shape) == 1:
x = x[:, np.newaxis]
assert len(x.shape) == 2, 'Check state dimensions'
return self.kernel(self.D, np.repeat(x, self.D.shape[1], axis=1))
def update(self, state_sequence, reward_sequence):
"""
Update GP after observing states (state_sequence) and rewards (reward_sequence)
"""
for i in range(reward_sequence.shape[0]):
trajt_1 = state_sequence[:, i][:, np.newaxis]
trajt = state_sequence[:, i + 1][:, np.newaxis]
k_t_1 = self.kernel(self.D, trajt_1)
k_t = self.kernel(self.D, trajt)
ktt = self.kernel(trajt, trajt)
at = np.dot(self.K_inv, k_t)
delk_t_1 = k_t_1 - self.gamma * k_t
ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma * at)
st = self.sigma0 ** 2 - np.dot(ct.T, delk_t_1)
diff_r = np.dot(delk_t_1.T, self.alpha_)[0, 0] - reward_sequence[i]
self.alpha_ = self.alpha_ + ct / st * diff_r
self.C_ = self.C_ + np.dot(ct, ct.T) / st
self.A = at
assert not np.isnan(self.alpha_).any(
), 'Check alpha for NaN values'
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
def build_posterior(self, policy, num_episodes, max_episode_length,
test_every=np.inf, states_V_target=()):
"""
policy is a function that take state as input and returns an action
"""
statistics = trange(num_episodes)
test_error = np.array([])
for e in statistics:
is_terminal = False
num_steps = 0
state = self.env.reset()
action = policy(state)
state_sequence = np.empty((state.shape[0], max_episode_length +
1), dtype=np.float64, order='C')
state_sequence[:, 0] = state[:, 0]
reward_sequence = np.empty(max_episode_length, dtype=np.float64,
order='C')
while num_steps < max_episode_length and not is_terminal:
num_steps += 1
state, reward, is_terminal = self.env.step(action)
action = policy(state)
state_sequence[:, num_steps] = state[:, 0]
reward_sequence[num_steps - 1] = reward
state_sequence = state_sequence[:, 0:num_steps + 1]
reward_sequence = reward_sequence[0:num_steps]
if self.D.shape[1] == 0:
traj = state_sequence[:, 0][:, np.newaxis]
self.D = traj
self.V_D = self.V_mu(state_sequence[:, 0][:, np.newaxis])
self.K_inv = 1 / self.kernel(traj, traj)
self.A = np.array([[1]])
self.alpha_ = np.array([[0]])
self.C_ = np.array([[0]])
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
self.update(state_sequence, reward_sequence)
statistics.set_postfix(epi_length=num_steps, dict_size=self.D.
shape[1], cumm_cost=np.sum(reward_sequence))
if e % test_every == 0 and len(states_V_target) == 2:
V = self.get_value_function(states_V_target[0])
test_error = np.concatenate((test_error, np.array([np.mean(
np.abs(V - states_V_target[1]))])))
return test_error
def get_value_function(self, states):
if self.D.shape[1] == 0:
return self.V_mu(states)
else:
return self.V_mu(states) + np.dot(self.kernel(self.D, states).T,
self.diff_alpha_CV_D)
| from tqdm import trange
import numpy as np
class GPTD_fixedGrid:
def __init__(self, env, sigma0, gamma, kernel, D, V_mu=[]):
self.env = env
self.gamma = gamma
self.sigma0 = sigma0
self.kernel = kernel.kernel
if not V_mu:
V_mu = lambda s: np.zeros((s.shape[1], 1))
self.V_mu = V_mu
self.V_D = self.V_mu(D)
self.D = D
self.A = np.zeros((self.D.shape[1], 1), dtype=np.float64, order='C')
self.A[-1, 0] = 1
K = self.kernel(self.D, self.D)
self.K_inv = np.linalg.inv(K)
self.alpha_ = np.zeros((self.D.shape[1], 1), dtype=np.float64,
order='C')
self.C_ = np.zeros((self.D.shape[1], self.D.shape[1]), dtype=np.
float64, order='C')
self.diff_alpha_CV_D = np.empty((self.D.shape[1], 1), dtype=np.
float64, order='C')
def k_(self, x):
if len(x.shape) == 1:
x = x[:, np.newaxis]
assert len(x.shape) == 2, 'Check state dimensions'
return self.kernel(self.D, np.repeat(x, self.D.shape[1], axis=1))
def update(self, state_sequence, reward_sequence):
"""
Update GP after observing states (state_sequence) and rewards (reward_sequence)
"""
for i in range(reward_sequence.shape[0]):
trajt_1 = state_sequence[:, i][:, np.newaxis]
trajt = state_sequence[:, i + 1][:, np.newaxis]
k_t_1 = self.kernel(self.D, trajt_1)
k_t = self.kernel(self.D, trajt)
ktt = self.kernel(trajt, trajt)
at = np.dot(self.K_inv, k_t)
delk_t_1 = k_t_1 - self.gamma * k_t
ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma * at)
st = self.sigma0 ** 2 - np.dot(ct.T, delk_t_1)
diff_r = np.dot(delk_t_1.T, self.alpha_)[0, 0] - reward_sequence[i]
self.alpha_ = self.alpha_ + ct / st * diff_r
self.C_ = self.C_ + np.dot(ct, ct.T) / st
self.A = at
assert not np.isnan(self.alpha_).any(
), 'Check alpha for NaN values'
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
def build_posterior(self, policy, num_episodes, max_episode_length,
test_every=np.inf, states_V_target=()):
"""
policy is a function that take state as input and returns an action
"""
statistics = trange(num_episodes)
test_error = np.array([])
for e in statistics:
is_terminal = False
num_steps = 0
state = self.env.reset()
action = policy(state)
state_sequence = np.empty((state.shape[0], max_episode_length +
1), dtype=np.float64, order='C')
state_sequence[:, 0] = state[:, 0]
reward_sequence = np.empty(max_episode_length, dtype=np.float64,
order='C')
while num_steps < max_episode_length and not is_terminal:
num_steps += 1
state, reward, is_terminal = self.env.step(action)
action = policy(state)
state_sequence[:, num_steps] = state[:, 0]
reward_sequence[num_steps - 1] = reward
state_sequence = state_sequence[:, 0:num_steps + 1]
reward_sequence = reward_sequence[0:num_steps]
if self.D.shape[1] == 0:
traj = state_sequence[:, 0][:, np.newaxis]
self.D = traj
self.V_D = self.V_mu(state_sequence[:, 0][:, np.newaxis])
self.K_inv = 1 / self.kernel(traj, traj)
self.A = np.array([[1]])
self.alpha_ = np.array([[0]])
self.C_ = np.array([[0]])
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
self.update(state_sequence, reward_sequence)
statistics.set_postfix(epi_length=num_steps, dict_size=self.D.
shape[1], cumm_cost=np.sum(reward_sequence))
if e % test_every == 0 and len(states_V_target) == 2:
V = self.get_value_function(states_V_target[0])
test_error = np.concatenate((test_error, np.array([np.mean(
np.abs(V - states_V_target[1]))])))
return test_error
def get_value_function(self, states):
if self.D.shape[1] == 0:
return self.V_mu(states)
else:
return self.V_mu(states) + np.dot(self.kernel(self.D, states).T,
self.diff_alpha_CV_D)
| from tqdm import trange
import numpy as np
class GPTD_fixedGrid:
def __init__(self, env, sigma0, gamma, kernel, D, V_mu=[]):
self.env = env
self.gamma = gamma
self.sigma0 = sigma0
self.kernel = kernel.kernel
if (not V_mu):
V_mu = lambda s: np.zeros((s.shape[1],1))
self.V_mu = V_mu
self.V_D = self.V_mu(D)
self.D = D
# self.D = np.concatenate((self.D, self.V_D.T), axis=0) # Use V_mu in computing distances!
self.A = np.zeros((self.D.shape[1],1), dtype=np.float64, order='C')
self.A[-1,0] = 1
K = self.kernel(self.D, self.D)
self.K_inv = np.linalg.inv(K)
self.alpha_ = np.zeros((self.D.shape[1],1), dtype=np.float64, order='C')
self.C_ = np.zeros((self.D.shape[1],self.D.shape[1]), dtype=np.float64, order='C')
self.diff_alpha_CV_D = np.empty((self.D.shape[1],1), dtype=np.float64, order='C')
def k_(self,x):
if (len(x.shape)==1):
x = x[:,np.newaxis]
assert len(x.shape)==2, "Check state dimensions"
return self.kernel(self.D, np.repeat(x, self.D.shape[1], axis=1))
def update(self, state_sequence, reward_sequence):
"""
Update GP after observing states (state_sequence) and rewards (reward_sequence)
"""
for i in range(reward_sequence.shape[0]):
trajt_1 = state_sequence[:,i][:,np.newaxis] # No use of V_mu in computing distances!
trajt = state_sequence[:,i+1][:,np.newaxis]
# trajt_1 = np.concatenate((trajt_1, self.V_mu(trajt_1)), axis=0) # Use V_mu as well
# trajt = np.concatenate((trajt, self.V_mu(trajt)), axis=0)
k_t_1 = self.kernel(self.D, trajt_1)
k_t = self.kernel(self.D, trajt)
ktt = self.kernel(trajt, trajt)
at = np.dot(self.K_inv, k_t)
delk_t_1 = k_t_1 - self.gamma*k_t
ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma*at)
st = self.sigma0**2 - np.dot(ct.T, delk_t_1)
diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]
self.alpha_ = self.alpha_ + ct/st*diff_r
self.C_ = self.C_ + np.dot(ct, ct.T)/st
self.A = at
assert (not np.isnan(self.alpha_).any()), "Check alpha for NaN values"
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
def build_posterior(self, policy, num_episodes, max_episode_length, test_every=np.inf, states_V_target=()):
"""
policy is a function that take state as input and returns an action
"""
statistics = trange(num_episodes)
test_error = np.array([])
for e in statistics:
is_terminal = False
num_steps = 0
state = self.env.reset()
action = policy(state)
state_sequence = np.empty((state.shape[0], max_episode_length+1), dtype=np.float64, order='C')
state_sequence[:, 0] = state[:,0]
reward_sequence = np.empty(max_episode_length, dtype=np.float64, order='C')
while ((num_steps < max_episode_length) and (not is_terminal)):
num_steps+=1
state, reward, is_terminal = self.env.step(action)
action = policy(state)
state_sequence[:, num_steps] = state[:,0]
reward_sequence[num_steps-1] = reward
state_sequence = state_sequence[:, 0:(num_steps+1)]
reward_sequence = reward_sequence[0:num_steps]
if (self.D.shape[1]==0):
traj = state_sequence[:,0][:,np.newaxis]
self.D = traj
self.V_D = self.V_mu(state_sequence[:,0][:,np.newaxis])
self.K_inv = 1/self.kernel(traj, traj)
self.A = np.array([[1]])
self.alpha_ = np.array([[0]])
self.C_= np.array([[0]])
self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)
self.update(state_sequence, reward_sequence)
statistics.set_postfix(epi_length=num_steps, dict_size=self.D.shape[1], cumm_cost=np.sum(reward_sequence))
if (e%test_every==0 and len(states_V_target)==2):
V = self.get_value_function(states_V_target[0])
test_error = np.concatenate((test_error, np.array([np.mean(np.abs(V - states_V_target[1]))])))
return test_error
def get_value_function(self, states):
if (self.D.shape[1]==0):
return self.V_mu(states)
else:
return self.V_mu(states) + np.dot(self.kernel(self.D, states).T, self.diff_alpha_CV_D) | [
3,
5,
6,
7,
8
] |
1,224 | c926e16ef2daa5978b6c71e7794721d320bb9b1e | <mask token>
| def tetrahedron_filled(tetrahedrons, water):
var = 0
br = 0
tetrahedrons.sort()
for numbers in tetrahedrons:
v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000
if v < water:
br = br + 1
water = water - v
var = var + 1
print(br)
<mask token>
| def tetrahedron_filled(tetrahedrons, water):
var = 0
br = 0
tetrahedrons.sort()
for numbers in tetrahedrons:
v = tetrahedrons[var] ** 3 * 2 ** 0.5 / 12000
if v < water:
br = br + 1
water = water - v
var = var + 1
print(br)
print(tetrahedron_filled([1000, 10], 10))
| def tetrahedron_filled(tetrahedrons, water):
var=0
br=0
tetrahedrons.sort()
for numbers in tetrahedrons:
v=(tetrahedrons[var]**3*(2**0.5))/12000
if v<water:
br=br+1
water=water-v
var=var+1
print (br)
print (tetrahedron_filled([1000,10],10)) | null | [
0,
1,
2,
3
] |
1,225 | c349fa484476e3195e0932e425cbe93d7a7e5394 | <mask token>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
<mask token>
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
<mask token>
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
<mask token>
| <mask token>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
<mask token>
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
<mask token>
| <mask token>
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
if __name__ == '__main__':
rospy.init_node('keep_yaw')
movement_nullifier = MovementNullifier()
rospy.spin()
| import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber('odom', Odometry, self.OdomCallback)
rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,
queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service('stop_nullify', Empty, self.
StopListening)
self.start_service = rospy.Service('start_nullify', Empty, self.
StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.
linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding:
pose = data.pose
quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,
pose.pose.orientation.z, pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
if self.move and self.distance > self.prev_distance:
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
self.first = True
self.angle = 0.0
if __name__ == '__main__':
rospy.init_node('keep_yaw')
movement_nullifier = MovementNullifier()
rospy.spin()
| #!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber("odom", Odometry, self.OdomCallback)
rospy.Subscriber("cmd_vel", Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher("cmd_vel", Twist, queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01;
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service("stop_nullify", Empty, self.StopListening)
self.start_service = rospy.Service("start_nullify", Empty, self.StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
#self.Zero()
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
#print "Turning with velocity: %f" % (self.velocity)
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding: # lets counter react movement
pose = data.pose
quaternion = (pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
#print "Yaw: %f deg, Position x: %f" % (degrees(euler[2]), pose.pose.position.x)
#print "Turn: %r, Move: %r, First: %r" % (self.turn, self.move, self.first)
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
#print "Start yaw: %f" % (self.start_yaw)
#print "Start x: %f" % (self.start_x)
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
#print "Distance %f, prev distance: %f" % (self.distance, self.prev_distance)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
#print "Angle: %f" % self.angle
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
#print "Yaw: start %f, new %f" % (self.start_yaw, yaw)
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
#print "Position: start %f, new %f" % (self.start_x, x_position)
if self.move and (self.distance > self.prev_distance):
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
#print 'Resetting...'
self.first = True
self.angle = 0.0
if __name__ == "__main__":
rospy.init_node("keep_yaw")
movement_nullifier = MovementNullifier()
rospy.spin() | [
7,
8,
10,
11,
12
] |
1,226 | 3ffe16494eb45896563a2952f3bcf80fc19b2750 | <mask token>
| def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
<mask token>
| def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
print(solution(['Enter uid1234 Muzi', 'Enter uid4567 Prodo',
'Leave uid1234', 'Enter uid1234 Prodo', 'Change uid4567 Ryan']))
| def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
print(solution(["Enter uid1234 Muzi", "Enter uid4567 Prodo","Leave uid1234","Enter uid1234 Prodo","Change uid4567 Ryan"])) | null | [
0,
1,
2,
3
] |
1,227 | 8c166dd4cb091dcd2d80b5ae3085b5dee77564e0 | <mask token>
class Score(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Score(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __unicode__(self):
return smart_unicode(self.ps)
| <mask token>
class Score(models.Model):
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore = models.IntegerField(null=False, blank=False)
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps)
| from django.db import models
from django.utils.encoding import smart_unicode
class Score(models.Model):
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore = models.IntegerField(null=False, blank=False)
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps)
| from django.db import models
#from publicservants import models
from django.utils.encoding import smart_unicode
# Create your models here.
class Score(models.Model):
#score ID - publicservant ID plus score
#sID = models.ManyToOneRel(field=PublicServant.psID)
#PS Score at time t
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
#Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.
#Aggregrate values for period of time
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore= models.IntegerField(null=False, blank=False)
#Actual PSScore at 12am everyday
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)
| [
1,
2,
3,
4,
5
] |
1,228 | 9d3db4ca5bf964c68e9778a3625c842e74bf9dbd | <mask token>
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
<mask token>
| <mask token>
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
| <mask token>
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
| import os
import z5py
from shutil import copytree, copyfile
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(os.path.join(ref_path, 'attributes.json'), os.path.join(
out_path, 'attributes.json'))
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(os.path.join(in_path, in_key), os.path.join(out_path, out_key))
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print('Copying', name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images',
'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo)
)
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.
chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=
ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
| import os
import z5py
from shutil import copytree, copyfile
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
SCRATCH = '/scratch/pape/mito_em/data'
def create_file(out_path, ref_path):
os.makedirs(out_path, exist_ok=True)
copyfile(
os.path.join(ref_path, 'attributes.json'),
os.path.join(out_path, 'attributes.json')
)
def copy_to_scratch(in_path, out_path, out_key):
if out_key in z5py.File(out_path, 'r'):
return
in_key = 'setup0/timepoint0/s0'
copytree(
os.path.join(in_path, in_key),
os.path.join(out_path, out_key)
)
# copy training, test and val data to scratch
def prepare_scratch():
os.makedirs(SCRATCH, exist_ok=True)
for name in ('rat', 'human'):
for split in ('train', 'val', 'test'):
print("Copying", name, split)
out_path = os.path.join(SCRATCH, f'{name}_{split}.n5')
raw_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-raw.n5')
create_file(out_path, raw_path)
copy_to_scratch(raw_path, out_path, 'raw')
label_path = os.path.join(ROOT, f'{name}_{split}', 'images', 'local', 'em-mitos.n5')
if os.path.exists(label_path):
copy_to_scratch(label_path, out_path, 'labels')
def make_small_volume():
in_path = './data/human_train.n5'
f = z5py.File(in_path, 'r')
ds_r = f['raw']
ds_l = f['labels']
halo = [32, 256, 256]
shape = ds_r.shape
bb = tuple(slice(sh // 2 - ha, sh // 2 + ha) for sh, ha in zip(shape, halo))
raw = ds_r[bb]
labels = ds_l[bb]
out_path = './data/small.n5'
with z5py.File(out_path, 'a') as f:
f.create_dataset('raw', data=raw, compression='gzip', chunks=ds_r.chunks)
f.create_dataset('labels', data=labels, compression='gzip', chunks=ds_l.chunks)
if __name__ == '__main__':
prepare_scratch()
# make_small_volume()
| [
3,
5,
6,
7,
8
] |
1,229 | 4e7cfbf51ec9bad691d8dd9f103f22728cf5e952 | <mask token>
def compute_integrated_acquisition(acquisition, x):
"""
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x / acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition, x):
"""
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x_sample, d_acqu_x_sample = (acquisition.
acquisition_function_withGradients(x))
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x / acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f, X):
"""
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
"""
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:i + 1])
xbest[i] = ff[np.argmin(ff)]
return xbest
<mask token>
def best_value(Y, sign=1):
"""
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
"""
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i] = Y[:i + 1].min()
else:
Y_best[i] = Y[:i + 1].max()
return Y_best
<mask token>
def evaluate_function(f, X):
"""
Returns the evaluation of a function *f* and the time per evaluation
"""
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i, :] = f(X[i, :])
Y_time[i, :] = time.time() - time_zero
return Y_eval, Y_time
<mask token>
def merge_values(values1, values2):
"""
Merges two numpy arrays by calculating all possible combinations of rows
"""
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1, row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(
normalization_type))
return Y_norm
| <mask token>
def compute_integrated_acquisition(acquisition, x):
"""
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x / acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition, x):
"""
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x_sample, d_acqu_x_sample = (acquisition.
acquisition_function_withGradients(x))
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x / acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f, X):
"""
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
"""
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:i + 1])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds, num_data):
"""
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
"""
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data, dim))
for k in range(0, dim):
Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1
], size=num_data)
return Z_rand
def reshape(x, input_dim):
"""
Reshapes x into a matrix with input_dim columns
"""
x = np.array(x)
if x.size == input_dim:
x = x.reshape((1, input_dim))
return x
<mask token>
def best_value(Y, sign=1):
"""
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
"""
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i] = Y[:i + 1].min()
else:
Y_best[i] = Y[:i + 1].max()
return Y_best
<mask token>
def evaluate_function(f, X):
"""
Returns the evaluation of a function *f* and the time per evaluation
"""
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i, :] = f(X[i, :])
Y_time[i, :] = time.time() - time_zero
return Y_eval, Y_time
<mask token>
def merge_values(values1, values2):
"""
Merges two numpy arrays by calculating all possible combinations of rows
"""
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1, row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(
normalization_type))
return Y_norm
| <mask token>
def compute_integrated_acquisition(acquisition, x):
"""
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x / acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition, x):
"""
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x_sample, d_acqu_x_sample = (acquisition.
acquisition_function_withGradients(x))
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x / acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f, X):
"""
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
"""
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:i + 1])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds, num_data):
"""
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
"""
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data, dim))
for k in range(0, dim):
Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1
], size=num_data)
return Z_rand
def reshape(x, input_dim):
"""
Reshapes x into a matrix with input_dim columns
"""
x = np.array(x)
if x.size == input_dim:
x = x.reshape((1, input_dim))
return x
<mask token>
def get_d_moments(model, x):
"""
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
"""
input_dim = model.input_dim
x = reshape(x, input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:, :, 0]
dsdx = dvdx / (2 * np.sqrt(v))
return dmdx, dsdx
def get_quantiles(acquisition_par, fmin, m, s):
"""
Quantiles of the Gaussian distribution useful to determine the acquisition function values
:param acquisition_par: parameter of the acquisition function
:param fmin: current minimum.
:param m: vector of means.
:param s: vector of standard deviations.
"""
if isinstance(s, np.ndarray):
s[s < 1e-10] = 1e-10
elif s < 1e-10:
s = 1e-10
u = (fmin - m - acquisition_par) / s
phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)
Phi = 0.5 * erfc(-u / np.sqrt(2))
return phi, Phi, u
def best_value(Y, sign=1):
"""
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
"""
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i] = Y[:i + 1].min()
else:
Y_best[i] = Y[:i + 1].max()
return Y_best
<mask token>
def evaluate_function(f, X):
"""
Returns the evaluation of a function *f* and the time per evaluation
"""
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i, :] = f(X[i, :])
Y_time[i, :] = time.time() - time_zero
return Y_eval, Y_time
def values_to_array(input_values):
"""
Transforms a values of int, float and tuples to a column vector numpy array
"""
if type(input_values) == tuple:
values = np.array(input_values).reshape(-1, 1)
elif type(input_values) == np.ndarray:
values = np.atleast_2d(input_values)
elif type(input_values) == int or type(input_values) == float or type(np
.int64):
values = np.atleast_2d(np.array(input_values))
else:
print('Type to transform not recognized')
return values
def merge_values(values1, values2):
"""
Merges two numpy arrays by calculating all possible combinations of rows
"""
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1, row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(
normalization_type))
return Y_norm
| import numpy as np
from scipy.special import erfc
import time
from ..core.errors import InvalidConfigError
def compute_integrated_acquisition(acquisition, x):
"""
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x / acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition, x):
"""
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
"""
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]
acqu_x_sample, d_acqu_x_sample = (acquisition.
acquisition_function_withGradients(x))
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x / acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f, X):
"""
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
"""
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:i + 1])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds, num_data):
"""
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
"""
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data, dim))
for k in range(0, dim):
Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1
], size=num_data)
return Z_rand
def reshape(x, input_dim):
"""
Reshapes x into a matrix with input_dim columns
"""
x = np.array(x)
if x.size == input_dim:
x = x.reshape((1, input_dim))
return x
def get_moments(model, x):
"""
Moments (mean and sdev.) of a GP model at x
"""
input_dim = model.X.shape[1]
x = reshape(x, input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return m, s, fmin
def get_d_moments(model, x):
"""
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
"""
input_dim = model.input_dim
x = reshape(x, input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:, :, 0]
dsdx = dvdx / (2 * np.sqrt(v))
return dmdx, dsdx
def get_quantiles(acquisition_par, fmin, m, s):
"""
Quantiles of the Gaussian distribution useful to determine the acquisition function values
:param acquisition_par: parameter of the acquisition function
:param fmin: current minimum.
:param m: vector of means.
:param s: vector of standard deviations.
"""
if isinstance(s, np.ndarray):
s[s < 1e-10] = 1e-10
elif s < 1e-10:
s = 1e-10
u = (fmin - m - acquisition_par) / s
phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)
Phi = 0.5 * erfc(-u / np.sqrt(2))
return phi, Phi, u
def best_value(Y, sign=1):
"""
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
"""
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i] = Y[:i + 1].min()
else:
Y_best[i] = Y[:i + 1].max()
return Y_best
def spawn(f):
"""
Function for parallel evaluation of the acquisition function
"""
def fun(pipe, x):
pipe.send(f(x))
pipe.close()
return fun
def evaluate_function(f, X):
"""
Returns the evaluation of a function *f* and the time per evaluation
"""
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i, :] = f(X[i, :])
Y_time[i, :] = time.time() - time_zero
return Y_eval, Y_time
def values_to_array(input_values):
"""
Transforms a values of int, float and tuples to a column vector numpy array
"""
if type(input_values) == tuple:
values = np.array(input_values).reshape(-1, 1)
elif type(input_values) == np.ndarray:
values = np.atleast_2d(input_values)
elif type(input_values) == int or type(input_values) == float or type(np
.int64):
values = np.atleast_2d(np.array(input_values))
else:
print('Type to transform not recognized')
return values
def merge_values(values1, values2):
"""
Merges two numpy arrays by calculating all possible combinations of rows
"""
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1, row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(
normalization_type))
return Y_norm
| # Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import erfc
import time
from ..core.errors import InvalidConfigError
def compute_integrated_acquisition(acquisition,x):
'''
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x/acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition,x):
'''
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x/acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f,X):
'''
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
'''
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:(i+1)])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds,num_data):
'''
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
'''
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data,dim))
for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)
return Z_rand
def reshape(x,input_dim):
'''
Reshapes x into a matrix with input_dim columns
'''
x = np.array(x)
if x.size ==input_dim:
x = x.reshape((1,input_dim))
return x
def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin)
def get_d_moments(model,x):
'''
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
'''
input_dim = model.input_dim
x = reshape(x,input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return (dmdx, dsdx)
def get_quantiles(acquisition_par, fmin, m, s):
'''
Quantiles of the Gaussian distribution useful to determine the acquisition function values
:param acquisition_par: parameter of the acquisition function
:param fmin: current minimum.
:param m: vector of means.
:param s: vector of standard deviations.
'''
if isinstance(s, np.ndarray):
s[s<1e-10] = 1e-10
elif s< 1e-10:
s = 1e-10
u = (fmin - m - acquisition_par)/s
phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)
Phi = 0.5 * erfc(-u / np.sqrt(2))
return (phi, Phi, u)
def best_value(Y,sign=1):
'''
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
'''
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i]=Y[:(i+1)].min()
else:
Y_best[i]=Y[:(i+1)].max()
return Y_best
def spawn(f):
'''
Function for parallel evaluation of the acquisition function
'''
def fun(pipe,x):
pipe.send(f(x))
pipe.close()
return fun
def evaluate_function(f,X):
'''
Returns the evaluation of a function *f* and the time per evaluation
'''
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i,:] = f(X[i,:])
Y_time[i,:] = time.time() - time_zero
return Y_eval, Y_time
def values_to_array(input_values):
'''
Transforms a values of int, float and tuples to a column vector numpy array
'''
if type(input_values)==tuple:
values = np.array(input_values).reshape(-1,1)
elif type(input_values) == np.ndarray:
values = np.atleast_2d(input_values)
elif type(input_values)==int or type(input_values)==float or type(np.int64):
values = np.atleast_2d(np.array(input_values))
else:
print('Type to transform not recognized')
return values
def merge_values(values1,values2):
'''
Merges two numpy arrays by calculating all possible combinations of rows
'''
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1,row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
# Only normalize with non null sdev (divide by zero). For only one
# data point both std and ptp return 0.
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
# A range of [-1, 1] is more natural for a zero-mean GP
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(normalization_type))
return Y_norm
| [
7,
9,
12,
15,
16
] |
1,230 | cb32aa6a1c42e7bb417999f3f6f74ec22209c5a0 | <mask token>
class CrawlSerializer(serializers.Serializer):
<mask token>
<mask token>
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
<mask token>
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (
CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (
CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_stop_crawl'])
return crawl
| <mask token>
class CrawlSerializer(serializers.Serializer):
<mask token>
<mask token>
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
def is_valid(self, raise_exception=False):
with cache.lock(CRAWL_CACHE_LOCK_KEY):
return super().is_valid(raise_exception)
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (
CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (
CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_stop_crawl'])
return crawl
| <mask token>
class CrawlSerializer(serializers.Serializer):
crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START,
CRAWL_COMMAND_STOP])
default_error_messages = {**serializers.Serializer.
default_error_messages, 'cant_start_crawl':
'Can not start new crawl when already crawling', 'cant_stop_crawl':
'Can not stop crawl if not crawling'}
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
def is_valid(self, raise_exception=False):
with cache.lock(CRAWL_CACHE_LOCK_KEY):
return super().is_valid(raise_exception)
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (
CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (
CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_stop_crawl'])
return crawl
| from django.core.cache import cache
from rest_framework import serializers
from thenewboston.constants.crawl import CRAWL_COMMAND_START, CRAWL_COMMAND_STOP, CRAWL_STATUS_CRAWLING, CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED
from v1.cache_tools.cache_keys import CRAWL_CACHE_LOCK_KEY, CRAWL_STATUS
from v1.tasks.crawl import start_crawl
class CrawlSerializer(serializers.Serializer):
crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START,
CRAWL_COMMAND_STOP])
default_error_messages = {**serializers.Serializer.
default_error_messages, 'cant_start_crawl':
'Can not start new crawl when already crawling', 'cant_stop_crawl':
'Can not stop crawl if not crawling'}
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
def is_valid(self, raise_exception=False):
with cache.lock(CRAWL_CACHE_LOCK_KEY):
return super().is_valid(raise_exception)
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (
CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (
CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages[
'cant_stop_crawl'])
return crawl
| from django.core.cache import cache
from rest_framework import serializers
from thenewboston.constants.crawl import (
CRAWL_COMMAND_START,
CRAWL_COMMAND_STOP,
CRAWL_STATUS_CRAWLING,
CRAWL_STATUS_NOT_CRAWLING,
CRAWL_STATUS_STOP_REQUESTED
)
from v1.cache_tools.cache_keys import CRAWL_CACHE_LOCK_KEY, CRAWL_STATUS
from v1.tasks.crawl import start_crawl
class CrawlSerializer(serializers.Serializer):
crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START, CRAWL_COMMAND_STOP])
default_error_messages = {
**serializers.Serializer.default_error_messages,
'cant_start_crawl': 'Can not start new crawl when already crawling',
'cant_stop_crawl': 'Can not stop crawl if not crawling',
}
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
def is_valid(self, raise_exception=False):
with cache.lock(CRAWL_CACHE_LOCK_KEY):
return super().is_valid(raise_exception)
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages['cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages['cant_stop_crawl'])
return crawl
| [
4,
5,
6,
7,
8
] |
1,231 | 7ef62e5545930ab13312f8ae1ea70a74386d8bfa | <mask token>
| def ip_address(address):
new_address = ''
split_address = address.split('.')
seprator = '[.]'
new_address = seprator.join(split_address)
return new_address
<mask token>
| def ip_address(address):
new_address = ''
split_address = address.split('.')
seprator = '[.]'
new_address = seprator.join(split_address)
return new_address
if __name__ == '__main__':
ipaddress = ip_address('192.168.1.1')
print(ipaddress)
| def ip_address(address):
new_address = ""
split_address = address.split(".")
seprator = "[.]"
new_address = seprator.join(split_address)
return new_address
if __name__ == "__main__":
ipaddress = ip_address("192.168.1.1")
print(ipaddress)
| null | [
0,
1,
2,
3
] |
1,232 | 8f558593e516aa4a769b7c5e1c95c8bc23a36420 | <mask token>
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
<mask token>
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
<mask token>
| <mask token>
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
<mask token>
| <mask token>
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = np.log(true_p_mixture_probs
) / args.softmax_multiplier
args.seed = 1
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = util.init_models(
args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
print('Computing grads all at once is ok: {}'.format(are_tensors_equal(
grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.
format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
| import torch
import util
import numpy as np
import argparse
import losses
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = np.log(true_p_mixture_probs
) / args.softmax_multiplier
args.seed = 1
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = util.init_models(
args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(latent_dist,
num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(latent_dist, latent
).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(generative_model,
inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
print('Computing grads all at once is ok: {}'.format(are_tensors_equal(
grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.
format(are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
| import torch
import util
import numpy as np
import argparse
import losses
args = argparse.Namespace()
args.device = torch.device('cpu')
args.num_mixtures = 20
args.init_mixture_logits = np.ones(args.num_mixtures)
args.softmax_multiplier = 0.5
args.relaxed_one_hot = False
args.temperature = None
temp = np.arange(args.num_mixtures) + 5
true_p_mixture_probs = temp / np.sum(temp)
args.true_mixture_logits = \
np.log(true_p_mixture_probs) / args.softmax_multiplier
args.seed = 1
# init models
util.set_seed(args.seed)
generative_model, inference_network, true_generative_model = \
util.init_models(args)
optimizer_phi = torch.optim.Adam(inference_network.parameters())
optimizer_theta = torch.optim.Adam(generative_model.parameters())
batch_size = 3
num_particles = 4
obs = true_generative_model.sample_obs(batch_size)
def get_grads_correct(seed):
util.set_seed(seed)
theta_grads_correct = []
phi_grads_correct = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
theta_grads_correct = [parameter.grad.clone() for parameter in
generative_model.parameters()]
# in rws, we step as we compute the grads
# optimizer_theta.step()
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
phi_grads_correct = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in rws, we step as we compute the grads
# optimizer_phi.step()
return theta_grads_correct, phi_grads_correct
def get_grads_in_one(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
optimizer_phi.zero_grad()
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def get_grads_in_one_no_zeroing(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = losses.get_log_weight_and_log_q(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
# optimizer_phi.zero_grad() -> don't zero phi grads
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def get_log_weight_and_log_q_weird_detach(generative_model, inference_network, obs,
num_particles=1, reparam=False):
"""Compute log weight and log prob of inference network.
Args:
generative_model: models.GenerativeModel object
inference_network: models.InferenceNetwork object
obs: tensor of shape [batch_size]
num_particles: int
reparam: reparameterize sampling from q (only applicable if z is
Concrete)
Returns:
log_weight: tensor of shape [batch_size, num_particles]
log_q: tensor of shape [batch_size, num_particles]
"""
latent_dist = inference_network.get_latent_dist(obs)
latent = inference_network.sample_from_latent_dist(
latent_dist, num_particles, reparam=reparam)
log_p = generative_model.get_log_prob(latent, obs).transpose(0, 1)
log_q = inference_network.get_log_prob_from_latent_dist(
latent_dist, latent).transpose(0, 1)
log_weight = log_p - log_q.detach()
return log_weight, log_q
def get_grads_weird_detach(seed):
util.set_seed(seed)
theta_grads_in_one = []
phi_grads_in_one = []
log_weight, log_q = get_log_weight_and_log_q_weird_detach(
generative_model, inference_network, obs, num_particles)
optimizer_phi.zero_grad()
optimizer_theta.zero_grad()
wake_theta_loss, elbo = losses.get_wake_theta_loss_from_log_weight(
log_weight)
wake_theta_loss.backward(retain_graph=True)
# optimizer_phi.zero_grad() -> don't zero phi grads
# optimizer_theta.zero_grad()
wake_phi_loss = losses.get_wake_phi_loss_from_log_weight_and_log_q(
log_weight, log_q)
wake_phi_loss.backward()
# only get the grads in the end!
theta_grads_in_one = [parameter.grad.clone() for parameter in
generative_model.parameters()]
phi_grads_in_one = [parameter.grad.clone() for parameter in
inference_network.parameters()]
# in pyro, we want step to be in a different stage
# optimizer_theta.step()
# optimizer_phi.step()
return theta_grads_in_one, phi_grads_in_one
def are_tensors_equal(xs, ys):
return all([torch.all(torch.eq(x, y)) for x, y in zip(xs, ys)])
seed = 1
grads_correct = sum(get_grads_correct(seed), [])
grads_in_one = sum(get_grads_in_one(seed), [])
grads_in_one_no_zeroing = sum(get_grads_in_one_no_zeroing(seed), [])
grads_weird_detach = sum(get_grads_weird_detach(seed), [])
# is computing grads all in once ok?
print('Computing grads all at once is ok: {}'.format(
are_tensors_equal(grads_correct, grads_in_one)))
print('Computing grads all at once and not zeroing phi grads is ok: {}'.format(
are_tensors_equal(grads_correct, grads_in_one_no_zeroing)))
print('Computing grads with weird detach is ok: {}'.format(
are_tensors_equal(grads_correct, grads_weird_detach)))
| [
5,
6,
8,
9,
10
] |
1,233 | c5bbfa1a86dbbd431566205ff7d7b941bdceff58 | <mask token>
| <mask token>
reload(plib)
reload(rdl)
<mask token>
plt.rcParams.update(params)
<mask token>
if not os.path.exists(outpath):
os.mkdir(outpath)
plt.close('all')
<mask token>
if config['plot_param_space']:
for desc in search_res:
fig = plt.figure()
plib.plot_search_matrix(fig, search_res[desc], config['fselection'],
config['method'], config.get('glomeruli', []))
fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +
'.' + config['format']))
<mask token>
plib.new_descriptor_performance_plot(fig, max_overview, config['fselection'
], config['method'], config.get('glomeruli', []), ptype)
fig.subplots_adjust(bottom=0.25)
fig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[
'format']), dpi=600)
<mask token>
for i, desc in enumerate(desc2comp):
desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)
desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)
desc1_collect.extend(max_overview['svr']['linear']['p_selection'][
desc_idx1, :])
desc2_collect.extend(max_overview['forest']['forest']['p_selection'][
desc_idx2, :])
ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],
max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',
mfc=markers[i], label=desc, markersize=5)
ax.plot([0, 0.8], [0, 0.8], color='0.5')
plt.axis('scaled')
ax.set_xlim([0, 0.9])
ax.set_ylim([0, 0.9])
ax.set_xlabel('SVR (q2)')
ax.set_ylabel('RFR (q2)')
utils.simple_axis(ax)
ax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':
'small'}, bbox_to_anchor=(0.01, 1))
<mask token>
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
fig.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[
'format']), dpi=600)
assert len(desc1_collect) == len(desc2_collect)
<mask token>
print('svr better than rfr in {:.2f} \\% of the cases'.format(ratio))
if utils.run_from_ipython():
plt.show()
| <mask token>
reload(plib)
reload(rdl)
params = {'axes.labelsize': 6, 'font.size': 6, 'legend.fontsize': 7,
'xtick.labelsize': 6, 'ytick.labelsize': 6}
plt.rcParams.update(params)
config = json.load(open(sys.argv[1]))
outpath = os.path.join(config['inpath'], 'plots')
if not os.path.exists(outpath):
os.mkdir(outpath)
plt.close('all')
search_res, max_overview, sc, _ = rdl.read_paramsearch_results(config[
'inpath'], p_selection=config.get('selection', {}))
if config['plot_param_space']:
for desc in search_res:
fig = plt.figure()
plib.plot_search_matrix(fig, search_res[desc], config['fselection'],
config['method'], config.get('glomeruli', []))
fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +
'.' + config['format']))
fig = plt.figure(figsize=(3.35, 2))
ptype = config['descriptor_plot_type']
plib.new_descriptor_performance_plot(fig, max_overview, config['fselection'
], config['method'], config.get('glomeruli', []), ptype)
fig.subplots_adjust(bottom=0.25)
fig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[
'format']), dpi=600)
markers = ['1', '0']
desc2comp = ['EVA_100', 'all']
fig = plt.figure(figsize=(3.35, 1.8))
ax = fig.add_subplot(111)
desc1_collect, desc2_collect = [], []
for i, desc in enumerate(desc2comp):
desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)
desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)
desc1_collect.extend(max_overview['svr']['linear']['p_selection'][
desc_idx1, :])
desc2_collect.extend(max_overview['forest']['forest']['p_selection'][
desc_idx2, :])
ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],
max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',
mfc=markers[i], label=desc, markersize=5)
ax.plot([0, 0.8], [0, 0.8], color='0.5')
plt.axis('scaled')
ax.set_xlim([0, 0.9])
ax.set_ylim([0, 0.9])
ax.set_xlabel('SVR (q2)')
ax.set_ylabel('RFR (q2)')
utils.simple_axis(ax)
ax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':
'small'}, bbox_to_anchor=(0.01, 1))
ticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
ticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
fig.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[
'format']), dpi=600)
assert len(desc1_collect) == len(desc2_collect)
svr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if
d1 > d2])
rfr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if
d1 < d2])
ratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))
print('svr better than rfr in {:.2f} \\% of the cases'.format(ratio))
if utils.run_from_ipython():
plt.show()
| <mask token>
import sys
import os
import json
import numpy as np
import pylab as plt
import itertools as it
from master.libs import plot_lib as plib
from master.libs import read_data_lib as rdl
from master.libs import utils
import matplotlib.gridspec as gridspec
reload(plib)
reload(rdl)
params = {'axes.labelsize': 6, 'font.size': 6, 'legend.fontsize': 7,
'xtick.labelsize': 6, 'ytick.labelsize': 6}
plt.rcParams.update(params)
config = json.load(open(sys.argv[1]))
outpath = os.path.join(config['inpath'], 'plots')
if not os.path.exists(outpath):
os.mkdir(outpath)
plt.close('all')
search_res, max_overview, sc, _ = rdl.read_paramsearch_results(config[
'inpath'], p_selection=config.get('selection', {}))
if config['plot_param_space']:
for desc in search_res:
fig = plt.figure()
plib.plot_search_matrix(fig, search_res[desc], config['fselection'],
config['method'], config.get('glomeruli', []))
fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +
'.' + config['format']))
fig = plt.figure(figsize=(3.35, 2))
ptype = config['descriptor_plot_type']
plib.new_descriptor_performance_plot(fig, max_overview, config['fselection'
], config['method'], config.get('glomeruli', []), ptype)
fig.subplots_adjust(bottom=0.25)
fig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[
'format']), dpi=600)
markers = ['1', '0']
desc2comp = ['EVA_100', 'all']
fig = plt.figure(figsize=(3.35, 1.8))
ax = fig.add_subplot(111)
desc1_collect, desc2_collect = [], []
for i, desc in enumerate(desc2comp):
desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)
desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)
desc1_collect.extend(max_overview['svr']['linear']['p_selection'][
desc_idx1, :])
desc2_collect.extend(max_overview['forest']['forest']['p_selection'][
desc_idx2, :])
ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],
max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',
mfc=markers[i], label=desc, markersize=5)
ax.plot([0, 0.8], [0, 0.8], color='0.5')
plt.axis('scaled')
ax.set_xlim([0, 0.9])
ax.set_ylim([0, 0.9])
ax.set_xlabel('SVR (q2)')
ax.set_ylabel('RFR (q2)')
utils.simple_axis(ax)
ax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':
'small'}, bbox_to_anchor=(0.01, 1))
ticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
ticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
fig.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[
'format']), dpi=600)
assert len(desc1_collect) == len(desc2_collect)
svr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if
d1 > d2])
rfr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if
d1 < d2])
ratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))
print('svr better than rfr in {:.2f} \\% of the cases'.format(ratio))
if utils.run_from_ipython():
plt.show()
| #!/usr/bin/env python
# encoding: utf-8
"""
plot: regularization on x axis, number of k_best features on y
Created by on 2012-01-27.
Copyright (c) 2012. All rights reserved.
"""
import sys
import os
import json
import numpy as np
import pylab as plt
import itertools as it
from master.libs import plot_lib as plib
from master.libs import read_data_lib as rdl
from master.libs import utils
import matplotlib.gridspec as gridspec
reload(plib)
reload(rdl)
params = {'axes.labelsize': 6,
'font.size': 6,
'legend.fontsize': 7,
'xtick.labelsize':6,
'ytick.labelsize': 6}
plt.rcParams.update(params)
config = json.load(open(sys.argv[1]))
outpath = os.path.join(config['inpath'], 'plots')
if not os.path.exists(outpath):
os.mkdir(outpath)
# variables for results
plt.close('all')
search_res, max_overview, sc, _ = rdl.read_paramsearch_results(config['inpath'],
p_selection=config.get('selection', {}))
if config['plot_param_space']:
for desc in search_res:
fig = plt.figure()
plib.plot_search_matrix(fig, search_res[desc], config['fselection'],
config['method'], config.get('glomeruli', []))
fig.savefig(os.path.join(outpath, config['method'] + '_' + desc + '.' + config['format']))
# descriptor method performance plots
fig = plt.figure(figsize=(3.35, 2))
ptype = config['descriptor_plot_type']
plib.new_descriptor_performance_plot(fig, max_overview, config['fselection'],
config['method'],
config.get('glomeruli', []),
ptype)
fig.subplots_adjust(bottom=0.25)
fig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config['format']), dpi=600)
# ML method comparison plot
markers = ['1', '0']
desc2comp = ['EVA_100', 'all']
fig = plt.figure(figsize=(3.35, 1.8))
ax = fig.add_subplot(111)
desc1_collect, desc2_collect = [], []
for i, desc in enumerate(desc2comp):
desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)
desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)
desc1_collect.extend(max_overview['svr']['linear']['p_selection'][desc_idx1, :])
desc2_collect.extend(max_overview['forest']['forest']['p_selection'][desc_idx2, :])
ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],
max_overview['forest']['forest']['p_selection'][desc_idx2, :],
'o', mfc=markers[i],
label=desc,
markersize=5)
ax.plot([0, 0.8], [0, 0.8], color='0.5')
plt.axis('scaled')
ax.set_xlim([0, .9])
ax.set_ylim([0, .9])
ax.set_xlabel('SVR (q2)')
ax.set_ylabel('RFR (q2)')
utils.simple_axis(ax)
ax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size': 'small'}, bbox_to_anchor=(0.01, 1))
ticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
ticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
fig.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig(os.path.join(outpath, 'best_method_comparison.' + config['format']), dpi=600)
assert len(desc1_collect) == len(desc2_collect)
svr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 > d2])
rfr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 < d2])
ratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))
print('svr better than rfr in {:.2f} \% of the cases'.format(ratio))
if utils.run_from_ipython():
plt.show()
| [
0,
1,
2,
3,
4
] |
1,234 | 2c4fa92b28fa46a26f21ada8826474baac204e00 | <mask token>
| def mysum(*c):
print(sum([x for x in c]))
<mask token>
| def mysum(*c):
print(sum([x for x in c]))
mysum(1, 2, 3, 4, 11)
| def mysum(*c):
print(sum([x for x in c]))
mysum(1,2,3,4,0xB) | null | [
0,
1,
2,
3
] |
1,235 | af2aa236f6bfc582093faf868a374be1ebdfabf2 | <mask token>
| <mask token>
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
<mask token>
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
<mask token>
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
| <mask token>
cutoff = float(input('Tolerance (decimal)? '))
docpath = 'C:/Users/RackS/Documents/'
out = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',
encoding='UTF-8')
summary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +
'.txt', 'w', encoding='UTF-8')
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
| <mask token>
import os
import json
import csv
cutoff = float(input('Tolerance (decimal)? '))
docpath = 'C:/Users/RackS/Documents/'
out = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',
encoding='UTF-8')
summary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +
'.txt', 'w', encoding='UTF-8')
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
| """
"""
import os
import json
import csv
cutoff = float(input("Tolerance (decimal)? "))
docpath = "C:/Users/RackS/Documents/"
out = open("isosegmenter_scoring_error"+str(cutoff*100)+".csv", 'w', encoding='UTF-8')
summary = open("isosegmenter_score_summary_error"+str(cutoff*100)+".txt", 'w', encoding='UTF-8')
out.write("SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n")
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "E" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",E,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("EQUAL-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_eq) + "\n")
summary.write("FP equal domain: " + str(fp_eq) + "\n")
summary.write("FN equal domain: " + str(fn_eq) + "\n")
summary.write("Sensitivity: " + str(round(tp_eq/(tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_eq/(tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + "\n\n")
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "V" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",V,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("VARIABLE-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_var) + "\n")
summary.write("FP equal domain: " + str(fp_var) + "\n")
summary.write("FN equal domain: " + str(fn_var) + "\n")
summary.write("Sensitivity: " + str(round(tp_var/(tp_var + fn_var),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_var/(tp_var + fp_var),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + "\n\n")
summary.write("OVERALL STATISTICS\n")
summary.write("TP: " + str(tp_var + tp_eq) + "\n")
summary.write("FP: " + str(fp_var + fp_eq) + "\n")
summary.write("FN: " + str(fn_var + fn_eq) + "\n")
summary.write("Sensitivity: " + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + "\n") | [
0,
1,
2,
3,
4
] |
1,236 | 2396f7acab95260253c367c62002392760157705 | <mask token>
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,
'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],
'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,
'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':
'node_classification', 'model': 'gcnii', 'dataset': dataset}
args = get_extra_args(args)
return build_args_from_dict(args)
<mask token>
@register_func('cora')
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func('citeseer')
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func('pubmed')
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 0.0005
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
<mask token>
| <mask token>
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,
'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],
'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,
'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':
'node_classification', 'model': 'gcnii', 'dataset': dataset}
args = get_extra_args(args)
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func('cora')
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func('citeseer')
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func('pubmed')
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 0.0005
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
<mask token>
| <mask token>
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,
'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],
'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,
'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':
'node_classification', 'model': 'gcnii', 'dataset': dataset}
args = get_extra_args(args)
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func('cora')
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func('citeseer')
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func('pubmed')
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 0.0005
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
if __name__ == '__main__':
datasets = ['citeseer']
results = []
for x in datasets:
results += run(x)
print_result(results, datasets, 'gcnii')
| <mask token>
DATASET_REGISTRY = {}
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {'lr': 0.01, 'weight_decay': 0.0005, 'max_epoch': 1000,
'max_epochs': 1000, 'patience': 100, 'cpu': cpu, 'device_id': [0],
'seed': [42], 'dropout': 0.5, 'hidden_size': 256, 'num_layers': 32,
'lmbda': 0.5, 'wd1': 0.001, 'wd2': 0.0005, 'alpha': 0.1, 'task':
'node_classification', 'model': 'gcnii', 'dataset': dataset}
args = get_extra_args(args)
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func('cora')
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func('citeseer')
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func('pubmed')
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 0.0005
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
if __name__ == '__main__':
datasets = ['citeseer']
results = []
for x in datasets:
results += run(x)
print_result(results, datasets, 'gcnii')
| import random
import numpy as np
import torch
from utils import print_result, set_random_seed, get_dataset, get_extra_args
from cogdl.tasks import build_task
from cogdl.datasets import build_dataset
from cogdl.utils import build_args_from_dict
DATASET_REGISTRY = {}
def build_default_args_for_node_classification(dataset):
cpu = not torch.cuda.is_available()
args = {
"lr": 0.01,
"weight_decay": 5e-4,
"max_epoch": 1000,
"max_epochs": 1000,
"patience": 100,
"cpu": cpu,
"device_id": [0],
"seed": [42],
"dropout": 0.5,
"hidden_size": 256,
"num_layers": 32,
"lmbda": 0.5,
"wd1": 0.001,
"wd2": 5e-4,
"alpha": 0.1,
"task": "node_classification",
"model": "gcnii",
"dataset": dataset,
}
args = get_extra_args(args)
return build_args_from_dict(args)
def register_func(name):
def register_func_name(func):
DATASET_REGISTRY[name] = func
return func
return register_func_name
@register_func("cora")
def cora_config(args):
args.num_layers = 64
args.hidden_size = 64
args.dropout = 0.6
return args
@register_func("citeseer")
def citeseer_config(args):
args.num_layers = 32
args.hidden_size = 256
args.lr = 0.001
args.patience = 200
args.max_epoch = 2000
args.lmbda = 0.6
args.dropout = 0.7
return args
@register_func("pubmed")
def pubmed_config(args):
args.num_layers = 16
args.hidden_size = 256
args.lmbda = 0.4
args.dropout = 0.5
args.wd1 = 5e-4
return args
def run(dataset_name):
args = build_default_args_for_node_classification(dataset_name)
args = DATASET_REGISTRY[dataset_name](args)
dataset, args = get_dataset(args)
results = []
for seed in args.seed:
set_random_seed(seed)
task = build_task(args, dataset=dataset)
result = task.train()
results.append(result)
return results
if __name__ == "__main__":
# datasets = ["cora", "citeseer", "pubmed"]
datasets = ["citeseer"]
results = []
for x in datasets:
results += run(x)
print_result(results, datasets, "gcnii")
| [
5,
6,
7,
8,
10
] |
1,237 | dace25428f48da633ee571b51565d15650782649 | #!/usr/bin/python
import os
import sys
import csv
import json
from time import sleep
from datetime import datetime
ShowProgress = False
ConvertTime = False
def print_welcome():
print("""
[*******************************************************************************************************]
-----FINDING DUPLICATE OH16/OH20/O2 EVENTS----
This script will find duplicate OH16, OH20 and O2 events by iterating through two files from
F:/Data/Source_Altitude/DoubleHumps/ directory. It accepts the .txt file that is created by using the
"findDoubleHumpOH.mat" files in Matlab. Files should be of the same year and different channels i.e
"DoubleHumpsOH16_2010.txt", "DoubleHumpsOH20_2010.txt" and "DoublehumpsO2_2010.txt". The script will then
compare the two and returnthe correlated events. You will be asked to input the path and file names to both
input files as well as a name to have the new file be saved as. It is recommended that the file is saved in
a .csv format to easily be used by Microsoft Excel.
[*******************************************************************************************************]""")
def print_primary_options():
print("""\n[*] Options:\n
[1] Auto Correlate
[2] Manually Correlate
[3] Help
[99] Quit
""")
def print_secondary_options():
print("""\n[*] Options:\n
[1] Find OH16 & OH20 correlations
[2] Find OH16, OH20 and O2 correlations
[3] Help
[99] Quit
""")
def print_sort_options():
print("""\n[*] Options:\n
[1] Sort all
[2] Sort by years
[3] Sort by months
[4] Sort by seasons
[5] Help
[99] Quit
""")
#
# Print iterations progress
#
def print_progress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
formatStr = "{0:." + str(decimals) + "f}"
percents = formatStr.format(100 * (iteration / float(total)))
filledLength = int(round(barLength * iteration / float(total)))
bar = '*' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def initialize():
if file_exists('settings.json'):
print("Exists")
else:
settings = {
'setup': 'false',
'savePath': '',
'loadPath': '',
'dataPath': ''
}
s = json.dumps(settings)
with open('settings.json', 'w') as f:
f.write(s)
f.close()
print("[*] Initialized")
def is_windows():
if sys.platform == "win32":
return True
#
# Because Windows will be Windows...
#
def smart_input(message):
if is_windows():
return input(message)
else:
return raw_input(message)
def filepath_exists(path):
if os.path.exists(path):
return True
def file_exists(filename):
if os.path.isfile(filename):
return True
#
# Strip file contents
#
def strip_contents(filename):
contents = []
f = open(filename)
csv_f = csv.reader(f)
for rows in csv_f:
row4 = rows[4].strip()
row5 = rows[5].strip()
contents.append(rows[1].strip() + "_" + rows[2].strip() + "_" + rows[3].strip() + "_" + row4 + "_" + row5 + "_" + rows[6].strip())
f.close()
return contents
def find_duplicates(obj01, obj02, start_message):
duplicates = []
count = 0
i = 0
t = len(obj01)
print("[*] " + start_message)
if ShowProgress:
print_progress(i, t, prefix='Progress', suffix="Complete", barLength=50)
for OH16_rows in obj01:
for OH20_rows in obj02:
if OH16_rows == OH20_rows:
duplicates.append(OH16_rows)
count += 1
if ShowProgress:
i += 1
print_progress(i, t, prefix='Progress', suffix="Complete", barLength=50)
print("[*] Number of correlations: " + str(count))
sleep(1)
print("[*] Processing data completed!")
sleep(1)
return duplicates
#
# Convert data to CSV formatted
#
def format_list(text_list):
print("[*] Converting list to CSV format...")
formatted = []
i = 0
t = len(text_list)
title = False
if ShowProgress:
print_progress(i, t, prefix='Progress', suffix="Complete", barLength=50)
for rows in text_list:
if not title:
formatted.append('Data, Time, Orbit, Event, LAT, LONG')
formatted.append(rows.replace("_", ", "))
title = True
if ShowProgress:
i += 1
print_progress(i, t, prefix='Progress', suffix="Complete", barLength=50)
print("[*] Formatted...")
return formatted
#
# Save file
#
def save_to_file(name, text):
print("[*] Saving " + name + " file...")
file_write = open(name, "w+")
for rows in text:
file_write.write(rows + "\n")
file_write.close()
print("[*] File saved successfully: " + name)
def get_and_check_file():
name = smart_input("Enter the file path for a file: ")
while not file_exists(name):
name = smart_input("[!] Could not find file. Enter the file path for a file: ")
return name
def save_file_as():
save_name = smart_input("[*] Enter the name the file will be saved as (\"It will be saved as .csv\"): ")
return save_name
def cwd():
os.getcwd()
def manage_data_directory(type):
if type == "1620":
if not filepath_exists("Correlations/OH16_20/CSV/"):
os.makedirs("Correlations/OH16_20/CSV/")
if not filepath_exists("Correlations/OH16_20/TXT/"):
os.makedirs("Correlations/OH16_20/TXT/")
elif type == "O2":
if not filepath_exists("Correlations/OH16_20_O2/CSV/"):
os.makedirs("Correlations/OH16_20_O2/CSV/")
if not filepath_exists("Correlations/OH16_20/TXT/"):
os.makedirs("Correlations/OH16_20_O2/TXT/")
else:
print "[!] Something went wrong.."
# Script begins
#welcome_message()
#OH16_filename = smart_input("Enter the file path for an OH16 file: ")
#while not file_exists(OH16_filename):
# OH16_filename = smart_input("[!] - Could not find file. Enter the file path for an OH16 file: ")
#OH20_filename = smart_input("Enter the file path for an OH20 file: ")
#while not file_exists(OH20_filename):
# OH20_filename = smart_input("[!] - Could not find file. Enter the file path for an OH20 file: ")
#
#O2_filename = smart_input("Enter the file path for an O2 file: ")
#while not file_exists(O2_filename):
# O2_filename = smart_input("[!] - Could not find file. Enter the file path for an O2 file: ")
#save_name = smart_input("Enter the name the file will be saved as (\"It will be saved as .csv\"): ")
#progress = smart_input("Do you want to show the progress? y/n: ")
#if progress == 'y':
# ShowProgress = True
#OH16 = truncate_contents(OH16_filename)
#OH20 = truncate_contents(OH20_filename)
#O2 = truncate_contents(O2_filename)
#oh1620 = find_oh1620(OH16, OH20)
#oho2 = find_OHO2(oh1620, O2)
#formatted_list = format_list(oho2)
#os.getcwd()
#if not filepath_exists("Correlations/OH16_20_O2/CSV/"):
# os.makedirs("Correlations/OH16_20_O2/CSV/")
#if not filepath_exists("Correlations/OH16_20_O2/TXT/"):
# os.makedirs("Correlations/OH16_20_O2/TXT/")
#save_list_to_file("Correlations/OH16_20_O2/CSV/" + save_name + ".csv", formatted_list)
#save_list_to_file("Correlations/OH16_20_O2/TXT/" + save_name + ".txt", formatted_list)
| null | null | null | null | [
0
] |
1,238 | 347627df4b08eca6e2137161472b4d31534cf81b | <mask token>
| <mask token>
def apply_timezone_datetime(_local_tz: str, _time: datetime.time):
"""
set time zone + merge now().date() with time()
:param _local_tz:
:param _time:
:return:
"""
return pytz.timezone(_local_tz).localize(datetime.datetime.combine(
datetime.datetime.now().date(), _time))
| import pytz
import datetime
def apply_timezone_datetime(_local_tz: str, _time: datetime.time):
"""
set time zone + merge now().date() with time()
:param _local_tz:
:param _time:
:return:
"""
return pytz.timezone(_local_tz).localize(datetime.datetime.combine(
datetime.datetime.now().date(), _time))
| null | null | [
0,
1,
2
] |
1,239 | a36a553342cfe605a97ddc0f636bbb73b683f6a6 | <mask token>
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
<mask token>
| <mask token>
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\\[sd[a-z]\\])'
matcher = match_regex(filename, ERROR_RE)
device = next(matcher)
while True:
bus_regex = '(sd \\S+) {}.*'.format(re.escape(device))
print('bus_regex:', bus_regex)
bus = matcher.send(bus_regex)
serial_regex = '{} \\(SERIAL=([^)]*)\\)'.format(bus)
print('serial_regex:', serial_regex)
serial = matcher.send(serial_regex)
yield serial
device = matcher.send(ERROR_RE)
def main():
filename = 'iter2/log2.txt'
print('List of serial no found: ')
for serial in get_serials(filename=filename):
print(serial)
<mask token>
| <mask token>
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\\[sd[a-z]\\])'
matcher = match_regex(filename, ERROR_RE)
device = next(matcher)
while True:
bus_regex = '(sd \\S+) {}.*'.format(re.escape(device))
print('bus_regex:', bus_regex)
bus = matcher.send(bus_regex)
serial_regex = '{} \\(SERIAL=([^)]*)\\)'.format(bus)
print('serial_regex:', serial_regex)
serial = matcher.send(serial_regex)
yield serial
device = matcher.send(ERROR_RE)
def main():
filename = 'iter2/log2.txt'
print('List of serial no found: ')
for serial in get_serials(filename=filename):
print(serial)
if __name__ == '__main__':
main()
| import re
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\\[sd[a-z]\\])'
matcher = match_regex(filename, ERROR_RE)
device = next(matcher)
while True:
bus_regex = '(sd \\S+) {}.*'.format(re.escape(device))
print('bus_regex:', bus_regex)
bus = matcher.send(bus_regex)
serial_regex = '{} \\(SERIAL=([^)]*)\\)'.format(bus)
print('serial_regex:', serial_regex)
serial = matcher.send(serial_regex)
yield serial
device = matcher.send(ERROR_RE)
def main():
filename = 'iter2/log2.txt'
print('List of serial no found: ')
for serial in get_serials(filename=filename):
print(serial)
if __name__ == '__main__':
main()
| import re
def match_regex(filename, regex):
with open(filename) as file:
lines = file.readlines()
for line in reversed(lines):
match = re.match(regex, line)
if match:
regex = yield match.groups()[0]
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\[sd[a-z]\])'
# Create generator of XFS ERROR
matcher = match_regex(filename, ERROR_RE)
device = next(matcher)
while True:
# Create regex pattern for BUS INFO base on DEVICE got ERROR
bus_regex = '(sd \S+) {}.*'.format(re.escape(device))
print('bus_regex:', bus_regex)
# Send BUS regex to generator to get BUS info of ERROR
bus = matcher.send(bus_regex)
# Send SERIAL regex to generator to get SERIAL NO of DEVICE in ERROR
serial_regex = '{} \(SERIAL=([^)]*)\)'.format(bus)
print('serial_regex:', serial_regex)
serial = matcher.send(serial_regex)
yield serial
# Send ERROR regex to generator to get next DEVICE in ERROR
device = matcher.send(ERROR_RE)
def main():
filename = 'iter2/log2.txt'
print('List of serial no found: ')
for serial in get_serials(filename=filename):
print(serial)
if __name__ == '__main__':
main() | [
1,
3,
4,
5,
6
] |
1,240 | 1ed7fb0dd5f0fa5e60c855eceaaf3259092918ef | <mask token>
| <mask token>
print(x * y)
| x, y = [float(x) for x in raw_input().split(' ')]
print(x * y)
| x, y = [float(x) for x in raw_input().split(" ")]
print(x*y) | null | [
0,
1,
2,
3
] |
1,241 | b1a1287c2c3b624eb02f2955760f6e9eca8cdcf9 | cars=100
drivers=30
passengers=70
print "There are",cars,"cars available."
print "There are only",drivers,"drivers available."
print "Each driver needs to drive",passengers/drivers-1,"passengers."
| null | null | null | null | [
0
] |
1,242 | e68d872232b3eab4c33cbbe4376be7dd788888e2 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Child', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,
'0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'
)], default=-1)), ('sex', models.IntegerField(choices=[(1,
'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('content', models.TextField(
)), ('creation_date', models.DateTimeField(default=django.utils.
timezone.now)), ('is_read', models.BooleanField(default=False)), (
'receiver', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='message_receiver', to=settings.
AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db
.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),
migrations.CreateModel(name='Parent', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('children', models.ManyToManyField(to=
'placyk_app.Child'))]), migrations.CreateModel(name='Pground',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('place', models.CharField(
max_length=128)), ('description', models.TextField())]), migrations
.CreateModel(name='Quarter', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('name', models.CharField(choices=[('not defined', 0), (
'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',
max_length=64))]), migrations.CreateModel(name='Visit', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('time_from', models.
DateTimeField()), ('time_to', models.DateTimeField()), ('pground',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.
db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),
migrations.AddField(model_name='pground', name='quarter', field=
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'placyk_app.Quarter')), migrations.AddField(model_name='parent',
name='quarter', field=models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(
model_name='parent', name='user', field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=settings.
AUTH_USER_MODEL))]
| from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Child', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,
'0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'
)], default=-1)), ('sex', models.IntegerField(choices=[(1,
'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('content', models.TextField(
)), ('creation_date', models.DateTimeField(default=django.utils.
timezone.now)), ('is_read', models.BooleanField(default=False)), (
'receiver', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='message_receiver', to=settings.
AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db
.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),
migrations.CreateModel(name='Parent', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('children', models.ManyToManyField(to=
'placyk_app.Child'))]), migrations.CreateModel(name='Pground',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('place', models.CharField(
max_length=128)), ('description', models.TextField())]), migrations
.CreateModel(name='Quarter', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('name', models.CharField(choices=[('not defined', 0), (
'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',
max_length=64))]), migrations.CreateModel(name='Visit', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('time_from', models.
DateTimeField()), ('time_to', models.DateTimeField()), ('pground',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.
db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),
migrations.AddField(model_name='pground', name='quarter', field=
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'placyk_app.Quarter')), migrations.AddField(model_name='parent',
name='quarter', field=models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(
model_name='parent', name='user', field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=settings.
AUTH_USER_MODEL))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-20 08:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('age', models.IntegerField(choices=[(-1, 'not defined'), (0, '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6')], default=-1)),
('sex', models.IntegerField(choices=[(1, 'dziewczynka'), (2, 'chłopiec')], default=1)),
('whose_child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('is_read', models.BooleanField(default=False)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Parent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('children', models.ManyToManyField(to='placyk_app.Child')),
],
),
migrations.CreateModel(
name='Pground',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.CharField(max_length=128)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Quarter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('not defined', 0), ('Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined', max_length=64)),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_from', models.DateTimeField()),
('time_to', models.DateTimeField()),
('pground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Pground')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='pground',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
0,
1,
2,
3,
4
] |
1,243 | df39a97db25f03aca8ebd501283fd6a7c486db8c | <mask token>
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
| <mask token>
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
| <mask token>
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(User, related_name='project_ts_member',
limit_choices_to={'is_staff': True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
| from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(User, related_name='project_ts_member',
limit_choices_to={'is_staff': True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
| from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = (
("approve_project_ts", "Can approve timesheet"),
)
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(
User, related_name='project_ts_member',
limit_choices_to={'is_staff' : True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default="")
project_time_sheet = models.ForeignKey(ProjectTS, related_name="project_time_sheet")
project_leader = models.ForeignKey(User, related_name="pl",
limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default="")
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
| [
3,
4,
5,
6,
7
] |
1,244 | f80de2b069cf1dee2e665556262c6e84ce04b208 | <mask token>
class MainViewNode(NodeBase):
<mask token>
def Label(self):
return 'Main View'
<mask token>
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and self.button.label.encode(
'utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode)
| <mask token>
class DashboardTabNode(NodeBase):
<mask token>
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Dashboard>'
def Contents(self):
contents = AppWorkspace.DashboardContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.DashboardSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardTabControls()
class WorkbenchTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'Layout')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Workbench>'
def Contents(self):
contents = AppWorkspace.WorkbenchContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.WorkbenchSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.tabContent
workspacePanel.SetupWorkbenchTabControls()
class DashboardPartNode(NodeBase):
def __init__(self, part, settings, label=None):
NodeBase.__init__(self, 'FExtension')
self.part = part
self.settings = settings
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardPartControls()
class DockPartNode(NodeBase):
def __init__(self, part):
NodeBase.__init__(self, 'FExtension')
self.part = part
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def GetButtons(self):
return self.part.selectionActionButtons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.part
workspacePanel.SetupDockSectionPartControls()
class MainViewNode(NodeBase):
def __init__(self, view):
NodeBase.__init__(self, 'DisplayTabs')
self.view = view
def Label(self):
return 'Main View'
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self.view
workspace.SetupMainViewControls()
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and self.button.label.encode(
'utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode)
| <mask token>
class DashboardTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'WindowSwitch')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Dashboard>'
def Contents(self):
contents = AppWorkspace.DashboardContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.DashboardSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardTabControls()
class WorkbenchTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'Layout')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Workbench>'
def Contents(self):
contents = AppWorkspace.WorkbenchContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.WorkbenchSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.tabContent
workspacePanel.SetupWorkbenchTabControls()
class DashboardPartNode(NodeBase):
def __init__(self, part, settings, label=None):
NodeBase.__init__(self, 'FExtension')
self.part = part
self.settings = settings
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardPartControls()
class DockPartNode(NodeBase):
def __init__(self, part):
NodeBase.__init__(self, 'FExtension')
self.part = part
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def GetButtons(self):
return self.part.selectionActionButtons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.part
workspacePanel.SetupDockSectionPartControls()
class MainViewNode(NodeBase):
def __init__(self, view):
NodeBase.__init__(self, 'DisplayTabs')
self.view = view
def Label(self):
return 'Main View'
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self.view
workspace.SetupMainViewControls()
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and self.button.label.encode(
'utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode)
| <mask token>
class NodeBase:
<mask token>
def Label(self):
raise NotImplementedError('Label')
<mask token>
class WorkspaceNode(NodeBase):
def __init__(self, workspace, label):
NodeBase.__init__(self, 'FWorkspace')
self.contents = workspace
self.label = label
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupWorkspaceControls()
class DashboardTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'WindowSwitch')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Dashboard>'
def Contents(self):
contents = AppWorkspace.DashboardContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.DashboardSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardTabControls()
class WorkbenchTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'Layout')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Workbench>'
def Contents(self):
contents = AppWorkspace.WorkbenchContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.WorkbenchSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.tabContent
workspacePanel.SetupWorkbenchTabControls()
class DashboardPartNode(NodeBase):
def __init__(self, part, settings, label=None):
NodeBase.__init__(self, 'FExtension')
self.part = part
self.settings = settings
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardPartControls()
class DockPartNode(NodeBase):
def __init__(self, part):
NodeBase.__init__(self, 'FExtension')
self.part = part
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption'
) and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def GetButtons(self):
return self.part.selectionActionButtons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.part
workspacePanel.SetupDockSectionPartControls()
class MainViewNode(NodeBase):
def __init__(self, view):
NodeBase.__init__(self, 'DisplayTabs')
self.view = view
def Label(self):
return 'Main View'
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self.view
workspace.SetupMainViewControls()
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and self.button.label.encode(
'utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode)
| """ Compiled: 2020-09-18 10:38:52 """
#__src_file__ = "extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py"
""" Compiled: 2018-06-07 17:06:19 """
#__src_file__ = "extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py"
import acm
import FUxCore
import Contracts_AppConfig_Messages_AppWorkspace as AppWorkspace
class NodeBase():
def __init__(self, icon=''):
self.icon = icon
def Label(self):
raise NotImplementedError('Label')
def Icon(self):
return self.icon
class WorkspaceNode(NodeBase):
def __init__(self, workspace, label):
NodeBase.__init__(self, 'FWorkspace')
self.contents = workspace
self.label = label
def Label(self):
return self.label
def OnSelection(self, treePanel):
# TODO Don't navigate to siblings, go through parent
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupWorkspaceControls()
class DashboardTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'WindowSwitch')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Dashboard>'
def Contents(self):
contents = AppWorkspace.DashboardContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.DashboardSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardTabControls()
class WorkbenchTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'Layout')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Workbench>'
def Contents(self):
contents = AppWorkspace.WorkbenchContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.WorkbenchSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.tabContent
workspacePanel.SetupWorkbenchTabControls()
class DashboardPartNode(NodeBase):
def __init__(self, part, settings, label=None):
NodeBase.__init__(self, 'FExtension')
self.part = part
self.settings = settings
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption') and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardPartControls()
class DockPartNode(NodeBase):
def __init__(self, part):
NodeBase.__init__(self, 'FExtension')
self.part = part
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption') and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def GetButtons(self):
return self.part.selectionActionButtons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.part
workspacePanel.SetupDockSectionPartControls()
class MainViewNode(NodeBase):
def __init__(self, view):
NodeBase.__init__(self, 'DisplayTabs')
self.view = view
def Label(self):
return 'Main View'
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self.view
workspace.SetupMainViewControls()
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and \
self.button.label.encode('utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode) | [
15,
37,
38,
44,
48
] |
1,245 | b6df9414f99294c7986d3eb5332d40288f059cd1 | class default_locations:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| class default_locations:
mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'
data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'
mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'
data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'
mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'
data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'
<mask token>
| class default_locations:
mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'
data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'
mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'
data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'
mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'
data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'
<mask token>
if os.environ['HOSTNAME'].startswith('clip'):
default_locations.mc_2016_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2016_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.mc_2017_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2017_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.mc_2018_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2018_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
| class default_locations:
mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'
data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'
mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'
data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'
mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'
mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'
data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'
data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'
import os
if os.environ['HOSTNAME'].startswith('clip'):
default_locations.mc_2016_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2016_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.mc_2017_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2017_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.mc_2018_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
default_locations.data_2018_data_directory = (
'/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')
| class default_locations:
mc_2016_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2016_postProcessing_directory = "stops_2016_nano_v0p23/dilep/"
data_2016_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2016_postProcessing_directory = "stops_2016_nano_v0p19/dilep/"
mc_2017_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2017_postProcessing_directory = "stops_2017_nano_v0p23/dilep/"
data_2017_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2017_postProcessing_directory = "stops_2017_nano_v0p19/dilep/"
mc_2018_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2018_postProcessing_directory = "stops_2018_nano_v0p23/dilep/"
data_2018_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2018_postProcessing_directory = "stops_2018_nano_v0p19/dilep/"
import os
if os.environ['HOSTNAME'].startswith('clip'):
default_locations.mc_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.mc_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.mc_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
| [
1,
2,
3,
4,
5
] |
1,246 | 122c4f3a2949ee675b7dd64b9f9828e80cbe5610 | <mask token>
class TestData:
<mask token>
<mask token>
| <mask token>
class TestData:
<mask token>
def get_test_data(self):
return self.images
| <mask token>
class TestData:
def __init__(self, image_path='../../data/test_images/'):
test_names = os.listdir(image_path)
self.images = []
self.numbers = []
self.treshold = 0.25
for name in test_names:
self.images.append(cv2.imread(image_path + name))
self.numbers.append(int(re.sub('[^0-9]', '', name)))
def get_test_data(self):
return self.images
| import cv2
import os
import re
class TestData:
def __init__(self, image_path='../../data/test_images/'):
test_names = os.listdir(image_path)
self.images = []
self.numbers = []
self.treshold = 0.25
for name in test_names:
self.images.append(cv2.imread(image_path + name))
self.numbers.append(int(re.sub('[^0-9]', '', name)))
def get_test_data(self):
return self.images
| import cv2
import os
import re
class TestData:
def __init__(self, image_path= '../../data/test_images/'):
test_names = os.listdir(image_path)
self.images = []
self.numbers = []
self.treshold = .25
for name in test_names:
self.images.append(cv2.imread(image_path + name))
self.numbers.append(int(re.sub("[^0-9]", "", name)))
def get_test_data(self):
return self.images
| [
1,
2,
3,
4,
5
] |
1,247 | 23236cd8262eb414666db88215c01d973abf1d97 | <mask token>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
<mask token>
| <mask token>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
<mask token>
| <mask token>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == '__main__':
print(
'By default the program will open UserCustom.ini which should be in the directory as the program.'
)
user_input = str(input(
'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')
)
const = '+CVars='
config = open('UserCustom.ini', 'r')
out_file = open('UserCustom.ini.out', 'w')
out_value = ''
lines = config.readlines()
for i in range(0, len(lines)):
if lines[i].startswith(const):
value = lines[i].split(const)[-1].split('\n')[0]
if user_input.lower() == 'encode' or user_input.lower() == 'e':
out_value = encode(value)
elif user_input.lower() == 'decode' or user_input.lower() == 'd':
out_value = decode(value)
out_file.write(const + out_value + '\n')
else:
out_file.write(lines[i])
out_file.close()
config.close()
pass
| decoded = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e',
'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', '=', '.']
encoded = ['49', '48', '4B', '4A', '4D', '4C', '4F', '4E', '41', '40', '38',
'3B', '3A', '3D', '3C', '3F', '3E', '31', '30', '33', '32', '35', '34',
'37', '36', '29', '28', '2B', '2A', '2D', '2C', '2F', '2E', '21', '20',
'23', '18', '1B', '1A', '1D', '1C', '1F', '1E', '11', '10', '13', '12',
'15', '14', '17', '16', '09', '08', '0B', '0A', '0D', '0C', '0F', '0E',
'01', '00', '03', '44', '57']
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == '__main__':
print(
'By default the program will open UserCustom.ini which should be in the directory as the program.'
)
user_input = str(input(
'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')
)
const = '+CVars='
config = open('UserCustom.ini', 'r')
out_file = open('UserCustom.ini.out', 'w')
out_value = ''
lines = config.readlines()
for i in range(0, len(lines)):
if lines[i].startswith(const):
value = lines[i].split(const)[-1].split('\n')[0]
if user_input.lower() == 'encode' or user_input.lower() == 'e':
out_value = encode(value)
elif user_input.lower() == 'decode' or user_input.lower() == 'd':
out_value = decode(value)
out_file.write(const + out_value + '\n')
else:
out_file.write(lines[i])
out_file.close()
config.close()
pass
| decoded = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "=", "."]
encoded = ["49", "48", "4B", "4A", "4D", "4C", "4F", "4E", "41", "40", "38", "3B", "3A", "3D", "3C", "3F", "3E", "31", "30", "33", "32", "35", "34", "37", "36", "29", "28", "2B", "2A", "2D", "2C", "2F", "2E", "21", "20", "23", "18", "1B", "1A", "1D", "1C", "1F", "1E", "11", "10", "13", "12", "15", "14", "17", "16", "09", "08", "0B", "0A", "0D", "0C", "0F", "0E", "01", "00", "03", "44", "57"]
def decode(value) :
out_value = ""
char = [value[i:i+2] for i in range(0, len(value), 2)]
for i in range(0, len(char)) :
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char) :
out_value = ""
char = [value[i:i+1] for i in range(0, len(value))]
for i in range(0, len(char)) :
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == "__main__" :
print("By default the program will open UserCustom.ini which should be in the directory as the program.")
user_input = str(input("Would you like to encode or decode UserCustom.ini ? (encode/decode) "))
const = "+CVars="
config = open("UserCustom.ini" , "r")
out_file = open("UserCustom.ini.out", "w")
out_value = ""
lines = config.readlines()
for i in range(0, len(lines)) :
if lines[i].startswith(const) :
value = lines[i].split(const)[-1].split("\n")[0]
if user_input.lower() == "encode" or user_input.lower() == "e" :
out_value = encode(value)
elif user_input.lower() == "decode" or user_input.lower() == "d" :
out_value = decode(value)
out_file.write(const + out_value + "\n")
else :
out_file.write(lines[i])
out_file.close()
config.close()
pass | [
1,
2,
3,
4,
5
] |
1,248 | 7aa6bba8483082354a94ed5c465e59a0fc97fe23 | <mask token>
| <mask token>
for i in range(n):
if r[i] - b[i] == 1:
x += 1
elif r[i] - b[i] == -1:
y += 1
if x == 0:
print(-1)
else:
print(y // x + min(y % x + 1, 1))
| n = int(input())
r = list(map(int, input().split()))
b = list(map(int, input().split()))
l = [0] * n
x = 0
y = 0
for i in range(n):
if r[i] - b[i] == 1:
x += 1
elif r[i] - b[i] == -1:
y += 1
if x == 0:
print(-1)
else:
print(y // x + min(y % x + 1, 1))
| #https://codeforces.com/problemset/problem/1321/A
n=int(input())
r=list(map(int,input().split()))
b=list(map(int,input().split()))
l=[0]*n
x=0
y=0
for i in range(n):
if r[i]-b[i]==1:
x+=1
elif r[i]-b[i]==-1:
y+=1
if x==0:
print(-1)
else:
print(y//x+min(y%x+1,1))
| null | [
0,
1,
2,
3
] |
1,249 | 4c9f2b6fd119daa58b7f1dd7153c90df747e62cb | <mask token>
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
<mask token>
| <mask token>
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
<mask token>
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
| <mask token>
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods
=ndays - 1).mean(), name='EWMA')
data = data.join(EMA)
return data
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
| import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods
=ndays - 1).mean(), name='EWMA')
data = data.join(EMA)
return data
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
| # Moving Averages Code
# Load the necessary packages and modules
import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
# Simple Moving Average
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
# SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')
data = data.join(SMA)
return data
# Exponentially-weighted Moving Average
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'],
span=ndays,
min_periods=ndays - 1).mean(),
name='EWMA')
data = data.join(EMA)
return data
# Retrieve the Nifty data from Yahoo finance:
# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')
# close = XSHE000002_data['close']
#
# # Compute the 50-day SMA for NIFTY
# n = 50
# SMA_NIFTY = SMA(XSHE000002_data, n)
# SMA_NIFTY = SMA_NIFTY.dropna()
# SMA = SMA_NIFTY['SMA']
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
# Compute the 200-day EWMA for NIFTY
# ew = 200
# EWMA_NIFTY = EWMA(XSHE000002_data, ew)
# EWMA_NIFTY = EWMA_NIFTY.dropna()
# EWMA = EWMA_NIFTY['EWMA_200']
# Plotting the NIFTY Price Series chart and Moving Averages below
# plt.figure(figsize=(9, 5))
# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')
# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')
# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')
# plt.legend(loc=2, prop={'size': 11})
# plt.grid(True)
# plt.setp(plt.gca().get_xticklabels(), rotation=30)
# plt.show()
| [
1,
3,
4,
5,
6
] |
1,250 | db55a603615c7d896569ada84f3110dd6c0ce45f | <mask token>
def executeUpgrade():
shell.executeCommand('pkg upgrade')
<mask token>
| <mask token>
def executeUpgrade():
shell.executeCommand('pkg upgrade')
<mask token>
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
| <mask token>
def executeUpgrade():
shell.executeCommand('pkg upgrade')
<mask token>
def executeRemove(pkg_name):
shell.executeCommand('pkg remove ' + pkg_name)
shell.executeCommand('pkg autoremove')
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
| <mask token>
def executeUpgrade():
shell.executeCommand('pkg upgrade')
def executeInstall(pkg_name):
shell.executeCommand('pkg install ' + pkg_name)
def executeRemove(pkg_name):
shell.executeCommand('pkg remove ' + pkg_name)
shell.executeCommand('pkg autoremove')
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
| import shell
def executeUpgrade():
shell.executeCommand('pkg upgrade')
def executeInstall(pkg_name):
shell.executeCommand('pkg install ' + pkg_name)
def executeRemove(pkg_name):
shell.executeCommand('pkg remove ' + pkg_name)
shell.executeCommand('pkg autoremove')
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
| [
1,
2,
3,
4,
5
] |
1,251 | 3078a0c7e2c711da88846ca3401c7924b1790dbc | #!/usr/bin/env python
#
# ConVirt - Copyright (c) 2008 Convirture Corp.
# ======
#
# ConVirt is a Virtualization management tool with a graphical user
# interface that allows for performing the standard set of VM operations
# (start, stop, pause, kill, shutdown, reboot, snapshot, etc...). It
# also attempts to simplify various aspects of VM lifecycle management.
#
#
# This software is subject to the GNU General Public License, Version 2 (GPLv2)
# and for details, please consult it at:
#
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
#
#
# author : Jd <[email protected]>
#
import paramiko
from paramiko import SSHException
import os, sys
import getpass
import socket
from convirt.core.utils.utils import to_unicode,to_str
"""Paramiko helper class. Provides common functions as
-- validating host keys,
-- initializing a new transport,
-- agent based and password based authentication etc.
"""
class HostValidationException(Exception):
def __init__(self, errno, description):
Exception.__init__(self, errno, description)
self.errno = errno
self.description = description
def __repr__(self):
return "[Error %s], %s" % (self.errno, self.description)
def __str__(self):
return self.__repr__()
class AuthenticationException(Exception):
def __init__(self, errno, description):
Exception.__init__(self, errno, description)
self.errno = errno
self.description = description
def __repr__(self):
return "[Error %s], %s" % (self.errno, self.description)
def __str__(self):
return self.__repr__()
class CommunicationException(Exception):
def __init__(self, errno, description):
Exception.__init__(self, errno, description)
self.errno = errno
self.description = description
def __repr__(self):
return "[Error %s], %s" % (self.errno, self.description)
def __str__(self):
return self.__repr__()
class PHelper:
host_keys = {}
# credential helper
credentials_helper = None
## The credendital helper needs to get_credentials(hostname) method
## to return credentials
## the object returned should:
## get_username() and get_password() methods
## This would be used when the transport can not be initialized
## using given methods
@classmethod
def set_credentials_helper(cls, cred_helper):
""" Set the helper class"""
cls.credentials_helper = cred_helper
@classmethod
def load_keys(cls):
# TODO : May be we need to load /etc/ssh/known_hosts and merge it here.
try:
path = os.path.expanduser('~/.ssh/known_hosts')
cls.host_keys = paramiko.util.load_host_keys(path)
except IOError:
try:
path = os.path.expanduser('~/ssh/known_hosts')
cls.host_keys = paramiko.util.load_host_keys(path)
except IOError:
pass
@classmethod
def init_log(cls,log_file_name):
try:
paramiko.util.log_to_file(log_file_name)
except Exception ,ex:
print "Error initializing paramiko log.", ex
@classmethod
def validate_host_key(cls, transport, hostname):
"""
get the remote hosts key and validate against known host keys
throws exception with errno, reason
errno - reason
1 - Host not found
2. - Host found but key not found
3 - Authentication failed (wrong password?)
4 - Host found, key found, but keys do not match
(server changed/spoofed)
"""
# check server's host key -- this is important.
key = transport.get_remote_server_key()
if not PHelper.host_keys.has_key(hostname):
print "Warning : Host not found ! ", hostname
#raise HostValidationException(1, "Host not found")
elif not PHelper.host_keys[hostname].has_key(key.get_name()):
print "Warning: Key not found ! ", hostname
#raise HostValidationException(2, "Key not found.")
elif PHelper.host_keys[hostname][key.get_name()] != key:
raise HostValidationException(3, "Keys mismatch for " + hostname)
return True
## TODO : only for testing purpose
@classmethod
def interactive_auth(cls, transport, username, hostname):
default_auth = 'p'
auth = raw_input('Auth by (p)assword, (r)sa key, or (d)ss key? [%s] ' % default_auth)
if len(auth) == 0:
auth = default_auth
if auth == 'r':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
path = raw_input('RSA key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.RSAKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
key = paramiko.RSAKey.from_private_key_file(path, password)
transport.auth_publickey(username, key)
elif auth == 'd':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_dsa')
path = raw_input('DSS key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.DSSKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('DSS key password: ')
key = paramiko.DSSKey.from_private_key_file(path, password)
transport.auth_publickey(username, key)
else:
pw = getpass.getpass('Password for %s@%s: ' % (username, hostname))
transport.auth_password(username, pw)
#TODO : refine this.. and test it with passphrase, may be catch
# some other exception, if passphrase is wrong.
@classmethod
def authenticate(cls, transport, authtype,
keyfile=None, passphrase=None,
username=None, password=None):
default_authtype = 'password'
if authtype==None or len(authtype) == 0:
authtype = default_authtype
try:
if authtype == 'rsa':
default_keyfile = os.path.join(os.environ['HOME'],
'.ssh', 'id_rsa')
if keyfile == None or len(keyfile) == 0:
keyfile = default_keyfile
key = paramiko.RSAKey.from_private_key_file(keyfile,
passphrase)
elif authtype == 'dsa':
default_keyfile = os.path.join(os.environ['HOME'],
'.ssh', 'id_dsa')
if keyfile == None or len(keyfile) == 0:
keyfile = default_keyfile
key = paramiko.DSSKey.from_private_key_file(keyfile,
passphrase)
if authtype == 'rsa' or authtype == 'dsa':
transport.auth_publickey(username, key)
else:
transport.auth_password(username, password)
except paramiko.PasswordRequiredException, ex:
raise AuthenticationException(1, "Password required")
except paramiko.BadAuthenticationType, ex:
raise AuthenticationException(2, "Bad authentication type")
except paramiko.AuthenticationException, ex:
raise AuthenticationException(3, "Authentication failed.")
except paramiko.SSHException ,ex:
raise AuthenticationException(4,
"Invalid key file %s" % keyfile)
@classmethod
def agent_auth(cls, transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
return True, if the transport is authenticated
raises: AuthenticationException when network errro
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
#print "Warning: No keys found loaded in ssh-agent. Forgot to use ssh-add ?"
return
for key in agent_keys:
#print 'Trying ssh-agent key %s' % \
# paramiko.util.hexify(key.get_fingerprint()),
try:
transport.auth_publickey(username, key)
if not transport.is_authenticated():
continue
else:
break
except paramiko.AuthenticationException, e:
print "Used key from agent. Auth failed. Will skip it."
pass
except SSHException, ex:
raise CommunicationException(0, "[agent_auth]:" + to_str(ex))
@classmethod
def init_ssh_transport(cls, hostname, ssh_port=22,
authtype=None, keyfile=None,passphrase=None,
username=None, password=None):
try:
### Open SSH transport
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#
#TODO timeout value should be configurable from server pool
#
sock.settimeout(1)
sock.connect((to_str(hostname), ssh_port))
transport = paramiko.Transport(sock)
transport.start_client()
# validate the host key
cls.validate_host_key(transport, hostname)
# if username and password provided assume it is password
# type authentication
if not transport.is_authenticated() and authtype == None:
if username != None and password != None:
try:
cls.authenticate(transport,'password',
keyfile,passphrase,
username, password)
except AuthenticationException ,ex:
if ex.errno == 3 and cls.credentials_helper is not None:
# give a chance to cred helper to prompt
pass
else:
transport.close()
raise
## authenticate with the auth type provided.
if not transport.is_authenticated() and authtype != None:
try:
if authtype == "agent":
cls.agent_auth(transport, username)
if not transport.is_authenticated():
raise AuthenticationException(0,"Agent authentication failed")
else:
cls.authenticate(transport,authtype, keyfile,passphrase,
username, password)
except AuthenticationException ,ex:
if authtype == 'password' and \
ex.errno == 3 and \
cls.credentials_helper is not None:
# give a chance to cred helper to prompt
pass
else:
transport.close()
raise
# authenticate interactive way. just for testing
#if not transport.is_authenticated():
# cls.interactive_auth(transport, username, hostname)
if not transport.is_authenticated() and \
cls.credentials_helper is not None:
creds = cls.credentials_helper.get_credentials(hostname)
if creds is not None:
username = creds.get_username()
password = creds.get_password()
cls.authenticate(transport,'password',
keyfile,passphrase,
username, password)
if not transport.is_authenticated():
transport.close()
raise AuthenticationException("0",
hostname + " is not authenticated")
return transport
except socket.timeout : # clients may wish to treat this differently
raise
except socket.error, ex:
raise CommunicationException(0, to_str(ex))
## pass through method
@classmethod
def open_channel(cls,transport, kind, dest_addr=None, src_addr=None):
try:
ch = transport.open_channel(kind, dest_addr, src_addr)
except SSHException, ex:
raise CommunicationException(0, "[open_channel]" +to_str(ex))
return ch
# initialize key store
PHelper.load_keys()
#TODO : Add some test cases here.
if __name__ == "__main__":
host = "192.168.12.100"
#Test with passphrase
## t = PHelper.init_ssh_transport(host,
## authtype="rsa", passphrase="welcome",
## username = "root")
# Test with ssh-agent
t = PHelper.init_ssh_transport(host, username="root", authtype="agent")
ch = PHelper.open_channel(t, "session")
ch.close()
| null | null | null | null | [
0
] |
1,252 | 64fd597918fe8133d53d1df741512cd2e49a111d | <mask token>
class Ticket(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
<mask token>
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
| <mask token>
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
| <mask token>
class User(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Device(models.Model):
TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (
'30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (
'70', 'Маршрутизатор'), ('80', 'Модем')
type = models.CharField(max_length=3, choices=TYPE_CHOICES)
inv_no = models.CharField(max_length=40)
ip = models.IPAddressField(blank=True, null=True)
model = models.CharField(max_length=60, blank=True, null=True)
mac = custom_fields.MACAddressField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
place = models.ForeignKey('Place')
hostname = models.CharField(blank=True, null=True, max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0] == self.type:
return desc[1]
def get_absolute_url(self):
return '/place/' + str(self.place.id)
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
| <mask token>
class Message(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class User(models.Model):
name = models.CharField('Имя', max_length=60)
email = models.EmailField(blank=True, null=True)
phone = models.CharField('Внутр. телефон', max_length=30, blank=True,
null=True)
mobile = models.CharField('Корп. мобильный', max_length=30, blank=True,
null=True)
city_phone = models.CharField('Городской телефон', max_length=30, blank
=True, null=True)
sat_phone = models.CharField('Спутниковый телефон', max_length=30,
blank=True, null=True)
personal_phone = models.CharField('Личный телефон', max_length=30,
blank=True, null=True)
admin = models.BooleanField(default=False)
login = models.CharField(max_length=16, blank=True, null=True)
password = models.CharField(max_length=32, blank=True, null=True)
place = models.ForeignKey('Place', blank=True, null=True)
class Device(models.Model):
TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (
'30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (
'70', 'Маршрутизатор'), ('80', 'Модем')
type = models.CharField(max_length=3, choices=TYPE_CHOICES)
inv_no = models.CharField(max_length=40)
ip = models.IPAddressField(blank=True, null=True)
model = models.CharField(max_length=60, blank=True, null=True)
mac = custom_fields.MACAddressField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
place = models.ForeignKey('Place')
hostname = models.CharField(blank=True, null=True, max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0] == self.type:
return desc[1]
def get_absolute_url(self):
return '/place/' + str(self.place.id)
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
| # -*- coding: utf8 -*-
from django.db import models
import custom_fields
import datetime
#import mptt
# Create your models here.
class Message(models.Model):
user = models.ForeignKey('User')
time = models.DateTimeField(auto_now=True,auto_now_add=True)
text = models.TextField()
#true если это ответ поддержки
reply = models.BooleanField(default=False)
ticket = models.ForeignKey('Ticket')
ip = models.IPAddressField(blank=True,null=True)
class User(models.Model):
name = models.CharField("Имя",max_length=60)
email = models.EmailField(blank=True,null=True)
phone = models.CharField("Внутр. телефон",max_length=30,blank=True,null=True)
mobile = models.CharField("Корп. мобильный",max_length=30,blank=True,null=True)
city_phone = models.CharField("Городской телефон",max_length=30,blank=True,null=True)
sat_phone = models.CharField("Спутниковый телефон",max_length=30,blank=True,null=True)
personal_phone = models.CharField("Личный телефон",max_length=30,blank=True,null=True)
admin = models.BooleanField(default=False)
login = models.CharField(max_length=16,blank=True,null=True)
password = models.CharField(max_length=32,blank=True,null=True)
place = models.ForeignKey('Place',blank=True,null=True)
class Device(models.Model):
TYPE_CHOICES=(
('00','Компьютер'),
('10','Монитор'),
('20','Принтер'),
('30','МФУ'),
('40','Плоттер'),
('50','Сканер'),
('60','Сервер'),
('70','Маршрутизатор'),
('80','Модем'),
)
type=models.CharField(max_length=3,choices=TYPE_CHOICES)
inv_no=models.CharField(max_length=40)
ip=models.IPAddressField(blank=True,null=True)
model=models.CharField(max_length=60,blank=True,null=True)
mac=custom_fields.MACAddressField(blank=True,null=True)
info=models.TextField(blank=True,null=True)
place = models.ForeignKey('Place')
hostname=models.CharField(blank=True,null=True,max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0]==self.type:
return desc[1]
def get_absolute_url(self):
return "/place/"+str(self.place.id)
class Ticket(models.Model):
#NEW,OPEN,CLOSED,DELETED
STATUS_CHOICES=(
('00','Новое'),
('10','Принято'),
('20','Ожидаем ответ'),
('30','Закрыто'),
('40','Удалено'),
)
PRIO_CHOICES=(
('00','Крайне срочно'),
('10','Срочно'),
('20','Обычно'),
('30','Длительное')
)
CATEGORY_CHOICES=(
('00','Компьютеры, локальный софт, железо'),
('10','Печать, принтеры, расходники'),
('20','Корпоративные системы (SAP,АСУД ..)'),
('30','Сетевые сервисы и оборуд., Серверы'),
('40','СКС (провода, розетки)'),
)
status = models.CharField("Статус",max_length=3, choices=STATUS_CHOICES)
priority = models.CharField("Приоритет",max_length=3, choices=PRIO_CHOICES)
category = models.CharField("Категория",max_length=3, choices=CATEGORY_CHOICES,blank=True,null=True)
hours_limit=models.DecimalField("Лимит времени, ч.",max_digits=4, decimal_places=1,default=2)
#Описание проблемы. при создании тикета - присваиваем текст 1го обращения
#В процессе выполнения заявки можем менять
description = models.TextField("Описание проблемы")
#Описание решения по закрытии заявки
resume = models.TextField("Отчёт о решении",blank=True,null=True)
user = models.ForeignKey(User,related_name="tickets")
admin = models.ForeignKey(User,related_name="tasks",blank=True,null=True)
device = models.ForeignKey(Device,blank=True,null=True)
#Время создания.
ctime = models.DateTimeField(auto_now_add = True)
#Время закрытия
closing_time = models.DateTimeField(blank=True,null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta=datetime.datetime.now()-self.ctime
return round(delta.days*24.0+delta.seconds/3600.0,1)
def is_new(self,*args):
value=self.status
if args:
value=args[0]
if value=='00':
return True
else:
return False
def is_closed(self,*args):
value=self.status
if args:
value=args[0]
if value=='30':
return True
else:
return False
def accept_by(self,user):
self.admin=user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self',null=True, blank=True )
address = models.CharField(max_length=70)
LEVEL_DESC=(
(1,"Населённый пункт"),
(2,"Территория, группа зданий"),
(3,"Здание"),
(4,"Этаж"),
(5,"Кабинет/помещение"),
(6,"Место/комплекс"),
)
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res=0
try:
if self.parent!=None:
o=self
while (o.parent !=None):
res+=1
o=o.parent
except:
None
return res
def level_display(self):
level=self.get_level()
for desc in self.LEVEL_DESC:
if desc[0]==level:
return desc[1]
def path(self):
path=[]
o=self
while (o.parent != None):
path.insert(0,o)
o=o.parent
path.insert(0,o)
return path
def get_absolute_url(self):
return '/place/'+str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
#mptt.register(Place)
class Document(models.Model):
name=models.CharField(max_length=60)
place=models.ForeignKey(Place,blank=True,null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document=models.ForeignKey(Document)
version=models.IntegerField()
file_name=models.CharField(max_length=60)
comment=models.CharField(max_length=90,blank=True,null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
| [
20,
22,
27,
29,
32
] |
1,253 | 05aec07b94f3363e07d8740b102262d817e08e71 | # coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class GamificationLeaderboardsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_leaderboard(self, context_type, context_id, **kwargs):
"""
Retrieves leaderboard details and paginated entries
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard(context_type, context_id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param int size: The number of objects returned per page
:param int page: The number of the page returned, starting with 1
:param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]
:return: LeaderboardResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)
else:
(data) = self.get_leaderboard_with_http_info(context_type, context_id, **kwargs)
return data
def get_leaderboard_with_http_info(self, context_type, context_id, **kwargs):
"""
Retrieves leaderboard details and paginated entries
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. Sorting is based on the fields of LeaderboardEntryResource rather than the returned LeaderboardResource. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_with_http_info(context_type, context_id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param int size: The number of objects returned per page
:param int page: The number of the page returned, starting with 1
:param str order: A comma separated list of sorting requirements in priority order, each entry matching PROPERTY_NAME:[ASC|DESC]
:return: LeaderboardResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_type', 'context_id', 'size', 'page', 'order']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_type' is set
if ('context_type' not in params) or (params['context_type'] is None):
raise ValueError("Missing the required parameter `context_type` when calling `get_leaderboard`")
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_leaderboard`")
collection_formats = {}
path_params = {}
if 'context_type' in params:
path_params['context_type'] = params['context_type']
if 'context_id' in params:
path_params['context_id'] = params['context_id']
query_params = []
if 'size' in params:
query_params.append(('size', params['size']))
if 'page' in params:
query_params.append(('page', params['page']))
if 'order' in params:
query_params.append(('order', params['order']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/{context_type}/{context_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LeaderboardResource',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_leaderboard_rank(self, context_type, context_id, id, **kwargs):
"""
Retrieves a specific user entry with rank
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_rank(context_type, context_id, id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param str id: The id of a user (required)
:return: LeaderboardEntryResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)
else:
(data) = self.get_leaderboard_rank_with_http_info(context_type, context_id, id, **kwargs)
return data
def get_leaderboard_rank_with_http_info(self, context_type, context_id, id, **kwargs):
"""
Retrieves a specific user entry with rank
The context type identifies the type of entity (i.e., 'activity') being tracked on the leaderboard. The context ID is the unique ID of the actual entity tracked by the leaderboard. <br><br><b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_rank_with_http_info(context_type, context_id, id, async=True)
>>> result = thread.get()
:param async bool
:param str context_type: The context type for the leaderboard (required)
:param str context_id: The context id for the leaderboard (required)
:param str id: The id of a user (required)
:return: LeaderboardEntryResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_type', 'context_id', 'id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard_rank" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_type' is set
if ('context_type' not in params) or (params['context_type'] is None):
raise ValueError("Missing the required parameter `context_type` when calling `get_leaderboard_rank`")
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_leaderboard_rank`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_leaderboard_rank`")
collection_formats = {}
path_params = {}
if 'context_type' in params:
path_params['context_type'] = params['context_type']
if 'context_id' in params:
path_params['context_id'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/{context_type}/{context_id}/users/{id}/rank', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LeaderboardEntryResource',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_leaderboard_strategies(self, **kwargs):
"""
Get a list of available leaderboard strategy names
<b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_strategies(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_leaderboard_strategies_with_http_info(**kwargs)
else:
(data) = self.get_leaderboard_strategies_with_http_info(**kwargs)
return data
def get_leaderboard_strategies_with_http_info(self, **kwargs):
"""
Get a list of available leaderboard strategy names
<b>Permissions Needed:</b> ANY
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_leaderboard_strategies_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_leaderboard_strategies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['oauth2_client_credentials_grant', 'oauth2_password_grant']
return self.api_client.call_api('/leaderboards/strategies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[str]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| null | null | null | null | [
0
] |
1,254 | dbb007af79b2da2b5474281759c2bcce2a836fb5 | <mask token>
def getLinks(url):
links = []
document = BeautifulSoup(response, 'html.parser')
for element in document.findAll('a', href=re.compile('.pdf$')):
links.append(element.get('href'))
return links
<mask token>
| <mask token>
def getLinks(url):
links = []
document = BeautifulSoup(response, 'html.parser')
for element in document.findAll('a', href=re.compile('.pdf$')):
links.append(element.get('href'))
return links
<mask token>
for link in pdf_links:
pdf_file = requests.get(url)
files.append(pdf_file)
with Printer(linegap=1) as printer:
for pdf_file in files:
printer.text(pdf_file)
| <mask token>
def getLinks(url):
links = []
document = BeautifulSoup(response, 'html.parser')
for element in document.findAll('a', href=re.compile('.pdf$')):
links.append(element.get('href'))
return links
site = 'https://greenteapress.com/wp/think-python/'
http = httplib2.Http()
status, response = http.request(site)
pdf_links = getLinks(response)
files = []
for link in pdf_links:
pdf_file = requests.get(url)
files.append(pdf_file)
with Printer(linegap=1) as printer:
for pdf_file in files:
printer.text(pdf_file)
| from requests import get
from bs4 import BeautifulSoup, SoupStrainer
import httplib2
import re
from win32printing import Printer
def getLinks(url):
links = []
document = BeautifulSoup(response, 'html.parser')
for element in document.findAll('a', href=re.compile('.pdf$')):
links.append(element.get('href'))
return links
site = 'https://greenteapress.com/wp/think-python/'
http = httplib2.Http()
status, response = http.request(site)
pdf_links = getLinks(response)
files = []
for link in pdf_links:
pdf_file = requests.get(url)
files.append(pdf_file)
with Printer(linegap=1) as printer:
for pdf_file in files:
printer.text(pdf_file)
| from requests import get
from bs4 import BeautifulSoup, SoupStrainer
import httplib2
import re
from win32printing import Printer
def getLinks(url):
links = []
document = BeautifulSoup(response, "html.parser")
for element in document.findAll('a', href=re.compile(".pdf$")):
links.append(element.get('href'))
return links
site = 'https://greenteapress.com/wp/think-python/'
http = httplib2.Http()
status, response = http.request(site)
pdf_links = getLinks(response)
files = []
for link in pdf_links:
pdf_file = requests.get(url)
files.append(pdf_file)
with Printer(linegap=1) as printer:
for pdf_file in files:
printer.text(pdf_file) | [
1,
2,
3,
4,
5
] |
1,255 | 12f0eeeb81fe611d88e33fd2e8df407e289fb582 | # Error using ncdump - NetCDF4 Python
ncdump -h filename
| null | null | null | null | [
0
] |
1,256 | bbb3d27ce8f4c1943ecc7ab542346c9f41cbd30e | <mask token>
class popen:
<mask token>
def __init__(self, command):
self._command = command
self._process = None
<mask token>
<mask token>
| <mask token>
class popen:
<mask token>
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
| <mask token>
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
| <mask token>
import os
import signal
import sys
import subprocess
from subprocess import Popen, PIPE
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
| # Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module implements helpers for GN SDK e2e tests.
"""
# Note, this is run on bots, which only support python2.7.
# Be sure to only use python2.7 features in this module.
import os
import signal
import sys
import subprocess
from subprocess import Popen, PIPE
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
| [
2,
4,
5,
6,
7
] |
1,257 | ba483c7eaf2f2ced7f70a14b53c781f190585024 | <mask token>
| <mask token>
def main():
grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,
0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,
0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0,
0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
start = 0, 0
end = 8, 9
path = astar(grid, start, end)
print(path)
<mask token>
| <mask token>
def main():
grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,
0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,
0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0,
0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
start = 0, 0
end = 8, 9
path = astar(grid, start, end)
print(path)
if __name__ == '__main__':
main()
| from AStar import astar
def main():
grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,
0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,
0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0,
0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
start = 0, 0
end = 8, 9
path = astar(grid, start, end)
print(path)
if __name__ == '__main__':
main()
| from AStar import astar
def main():
grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
start = (0, 0)
end = (8, 9)
path = astar(grid, start, end)
print(path)
if __name__ == '__main__':
main()
| [
0,
1,
2,
3,
4
] |
1,258 | b0fad3847519bb18365a8cd4226d06e9d96a8308 | <mask token>
| <mask token>
urlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=
'index')]
| from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=
'index')]
| from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', views.index, name='index'),
]
| null | [
0,
1,
2,
3
] |
1,259 | 68a776d7fccc8d8496a944baff51d2a862fc7d31 | <mask token>
| def IsContinuous(numbers):
if not numbers or len(numbers) < 1:
return False
numbers.sort()
number_of_zero = 0
number_of_gap = 0
for i in range(len(numbers)):
if numbers[i] == 0:
number_of_zero += 1
small = number_of_zero
big = small + 1
while big < len(numbers):
if numbers[small] == numbers[big]:
return False
number_of_gap += numbers[big] - numbers[small] - 1
small = big
big += 1
if number_of_gap <= number_of_zero:
return True
else:
return False
| # flush in poker
def IsContinuous(numbers):
if not numbers or len(numbers) < 1 :
return False
numbers.sort()
number_of_zero = 0
number_of_gap = 0
for i in range(len(numbers)):
if numbers[i] == 0:
number_of_zero += 1
small = number_of_zero
big = small + 1
while(big < len(numbers)):
if numbers[small] == numbers[big]:
return False
number_of_gap += (numbers[big] - numbers[small] - 1)
small = big
big += 1
if number_of_gap <= number_of_zero:
return True
else:
return False
| null | null | [
0,
1,
2
] |
1,260 | d73491d6673abdabad85176c5f75a191995c806d | <mask token>
| <mask token>
app_name = 'blogs'
urlpatterns = [path('', views.index, name='index'), re_path(
'^blogs/(?P<blog_id>\\d+)/$', views.blog, name='blog'), path(
'new_blog/', views.new_blog, name='new_blog'), re_path(
'^edit_blog/(?P<blog_id>\\d+)/$', views.edit_blog, name='edit_blog')]
| from . import views
from django.urls import path, re_path
app_name = 'blogs'
urlpatterns = [path('', views.index, name='index'), re_path(
'^blogs/(?P<blog_id>\\d+)/$', views.blog, name='blog'), path(
'new_blog/', views.new_blog, name='new_blog'), re_path(
'^edit_blog/(?P<blog_id>\\d+)/$', views.edit_blog, name='edit_blog')]
| from . import views
from django.urls import path, re_path
app_name = "blogs"
urlpatterns = [
path('', views.index, name='index'),
re_path(r'^blogs/(?P<blog_id>\d+)/$', views.blog, name='blog'),
path('new_blog/', views.new_blog, name='new_blog'),
re_path(r'^edit_blog/(?P<blog_id>\d+)/$', views.edit_blog, name='edit_blog'),
]
| null | [
0,
1,
2,
3
] |
1,261 | 618b6c74133e181ce5cbaf4e969d9fc3aa44ce98 | <mask token>
class TestMultiTan(object):
<mask token>
if sys.platform == 'darwin':
WTML = WTML.replace('Dec="0.7438249862258411"',
'Dec="0.743824986225841"')
<mask token>
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
<mask token>
<mask token>
<mask token>
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
<mask token>
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(self.WTML.replace(
'Thumbnail="thumb.jpg"', '').replace(
'<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',
'<ThumbnailUrl></ThumbnailUrl>'))
args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.
work_path('basic_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('basic_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
<mask token>
@pytest.mark.skipif('not HAS_REPROJECT')
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path('as_multi_wcs')
pio = pyramid.PyramidIO(outdir, default_format='fits')
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')
], hdu_index=0)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
| <mask token>
class TestMultiTan(object):
<mask token>
if sys.platform == 'darwin':
WTML = WTML.replace('Dec="0.7438249862258411"',
'Dec="0.743824986225841"')
<mask token>
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
<mask token>
def test_basic(self):
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])
proc = multi_tan.MultiTanProcessor(coll)
from ..pyramid import PyramidIO
pio = PyramidIO(self.work_path('basic'), default_format='fits')
builder = Builder(pio)
proc.compute_global_pixelization(builder)
proc.tile(pio)
<mask token>
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
<mask token>
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(self.WTML.replace(
'Thumbnail="thumb.jpg"', '').replace(
'<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',
'<ThumbnailUrl></ThumbnailUrl>'))
args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.
work_path('basic_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('basic_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
<mask token>
@pytest.mark.skipif('not HAS_REPROJECT')
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path('as_multi_wcs')
pio = pyramid.PyramidIO(outdir, default_format='fits')
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')
], hdu_index=0)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
| <mask token>
class TestMultiTan(object):
<mask token>
if sys.platform == 'darwin':
WTML = WTML.replace('Dec="0.7438249862258411"',
'Dec="0.743824986225841"')
def setup_method(self, method):
from tempfile import mkdtemp
self.work_dir = mkdtemp()
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
def work_path(self, *pieces):
return os.path.join(self.work_dir, *pieces)
def test_basic(self):
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])
proc = multi_tan.MultiTanProcessor(coll)
from ..pyramid import PyramidIO
pio = PyramidIO(self.work_path('basic'), default_format='fits')
builder = Builder(pio)
proc.compute_global_pixelization(builder)
proc.tile(pio)
<mask token>
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
<mask token>
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(self.WTML.replace(
'Thumbnail="thumb.jpg"', '').replace(
'<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',
'<ThumbnailUrl></ThumbnailUrl>'))
args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.
work_path('basic_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('basic_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
def test_study_cli(self):
"""
Test tile-study on FITS. This should properly go in test_study.py, but
this file is the one that has the reference WTML information.
"""
expected = etree.fromstring(self.WTML)
args = ['tile-study', '--placeholder-thumbnail', '--outdir', self.
work_path('study_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('study_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('study_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('study_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
@pytest.mark.skipif('not HAS_REPROJECT')
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path('as_multi_wcs')
pio = pyramid.PyramidIO(outdir, default_format='fits')
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')
], hdu_index=0)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
| <mask token>
class TestMultiTan(object):
WTML = """
<Folder Browseable="True" Group="Explorer" Name="Toasty" Searchable="True">
<Place
Angle="0"
AngularSize="0"
Constellation="VIR"
DataSetType="Sky"
Dec="0.7438249862258411"
Magnitude="0"
Name="Toasty"
Opacity="100"
RA="14.41975153073335"
Rotation="0"
Thumbnail="thumb.jpg"
ZoomLevel="0.2437119999998555"
>
<ForegroundImageSet>
<ImageSet
BandPass="Visible"
BaseDegreesPerTile="0.023893333333319167"
BaseTileLevel="0"
BottomsUp="False"
CenterX="216.2962962963"
CenterY="0.74380165289257"
DataSetType="Sky"
ElevationModel="False"
FileType=".fits"
Generic="False"
Name="Toasty"
OffsetX="2.33333333333195e-05"
OffsetY="2.33333333333195e-05"
Projection="Tan"
QuadTreeMap=""
Rotation="-0"
Sparse="True"
StockSet="False"
TileLevels="1"
Url="{1}/{3}/{3}_{2}.fits"
WidthFactor="2"
>
<ThumbnailUrl>thumb.jpg</ThumbnailUrl>
</ImageSet>
</ForegroundImageSet>
</Place>
</Folder>"""
if sys.platform == 'darwin':
WTML = WTML.replace('Dec="0.7438249862258411"',
'Dec="0.743824986225841"')
def setup_method(self, method):
from tempfile import mkdtemp
self.work_dir = mkdtemp()
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
def work_path(self, *pieces):
return os.path.join(self.work_dir, *pieces)
def test_basic(self):
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')])
proc = multi_tan.MultiTanProcessor(coll)
from ..pyramid import PyramidIO
pio = PyramidIO(self.work_path('basic'), default_format='fits')
builder = Builder(pio)
proc.compute_global_pixelization(builder)
proc.tile(pio)
BARY_SLICES = [(slice(0, 128), slice(0, 128)), (slice(0, 128), slice(
128, None)), (slice(128, None), slice(0, 128)), (slice(128, None),
slice(128, None))]
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
WCS512_BARYDATA = [(63.44949378800272, 64.40535387506924), (
63.24744175084746, 63.67473452789256), (65.22950207855361,
63.35629429568745), (62.027396724898814, 62.815937534782144)]
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(self.WTML.replace(
'Thumbnail="thumb.jpg"', '').replace(
'<ThumbnailUrl>thumb.jpg</ThumbnailUrl>',
'<ThumbnailUrl></ThumbnailUrl>'))
args = ['tile-multi-tan', '--hdu-index', '0', '--outdir', self.
work_path('basic_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('basic_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('basic_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('basic_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
def test_study_cli(self):
"""
Test tile-study on FITS. This should properly go in test_study.py, but
this file is the one that has the reference WTML information.
"""
expected = etree.fromstring(self.WTML)
args = ['tile-study', '--placeholder-thumbnail', '--outdir', self.
work_path('study_cli'), test_path('wcs512.fits.gz')]
cli.entrypoint(args)
with open(self.work_path('study_cli', 'index_rel.wtml'), 'rt',
encoding='utf8') as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = ['cascade', '--start', '1', self.work_path('study_cli')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('study_cli', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
@pytest.mark.skipif('not HAS_REPROJECT')
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path('as_multi_wcs')
pio = pyramid.PyramidIO(outdir, default_format='fits')
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection([test_path('wcs512.fits.gz')
], hdu_index=0)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = ['cascade', '--start', '1', self.work_path('as_multi_wcs')]
cli.entrypoint(args)
self.maybe_test_barycenter(self.work_path('as_multi_wcs', '0', '0',
'0_0.fits'), self.WCS512_BARYDATA)
| # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2021 the AAS WorldWide Telescope project
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.testing as nt
import os.path
import pytest
import sys
from xml.etree import ElementTree as etree
from . import assert_xml_elements_equal, test_path
from ..builder import Builder
from .. import cli
from .. import collection
from .. import multi_tan
try:
from astropy.io import fits
HAS_ASTRO = True
except ImportError:
HAS_ASTRO = False
try:
import reproject
HAS_REPROJECT = True
except ImportError:
HAS_REPROJECT = False
class TestMultiTan(object):
WTML = """
<Folder Browseable="True" Group="Explorer" Name="Toasty" Searchable="True">
<Place
Angle="0"
AngularSize="0"
Constellation="VIR"
DataSetType="Sky"
Dec="0.7438249862258411"
Magnitude="0"
Name="Toasty"
Opacity="100"
RA="14.41975153073335"
Rotation="0"
Thumbnail="thumb.jpg"
ZoomLevel="0.2437119999998555"
>
<ForegroundImageSet>
<ImageSet
BandPass="Visible"
BaseDegreesPerTile="0.023893333333319167"
BaseTileLevel="0"
BottomsUp="False"
CenterX="216.2962962963"
CenterY="0.74380165289257"
DataSetType="Sky"
ElevationModel="False"
FileType=".fits"
Generic="False"
Name="Toasty"
OffsetX="2.33333333333195e-05"
OffsetY="2.33333333333195e-05"
Projection="Tan"
QuadTreeMap=""
Rotation="-0"
Sparse="True"
StockSet="False"
TileLevels="1"
Url="{1}/{3}/{3}_{2}.fits"
WidthFactor="2"
>
<ThumbnailUrl>thumb.jpg</ThumbnailUrl>
</ImageSet>
</ForegroundImageSet>
</Place>
</Folder>"""
# Gross workaround for platform differences in the XML output.
if sys.platform == "darwin":
WTML = WTML.replace('Dec="0.7438249862258411"', 'Dec="0.743824986225841"')
# Back to the non-gross stuff.
def setup_method(self, method):
from tempfile import mkdtemp
self.work_dir = mkdtemp()
def teardown_method(self, method):
from shutil import rmtree
rmtree(self.work_dir)
def work_path(self, *pieces):
return os.path.join(self.work_dir, *pieces)
def test_basic(self):
coll = collection.SimpleFitsCollection([test_path("wcs512.fits.gz")])
proc = multi_tan.MultiTanProcessor(coll)
from ..pyramid import PyramidIO
pio = PyramidIO(self.work_path("basic"), default_format="fits")
builder = Builder(pio)
proc.compute_global_pixelization(builder)
proc.tile(pio)
BARY_SLICES = [
(slice(0, 128), slice(0, 128)),
(slice(0, 128), slice(128, None)),
(slice(128, None), slice(0, 128)),
(slice(128, None), slice(128, None)),
]
def maybe_test_barycenter(self, path, bary_expected):
"""
Check the barycenters of four 128x128 quadrants of a tile file. The idea
here is that if we introduce a problem with vertical flips in tiled FITS
processing, we'll detect it here.
"""
if not HAS_ASTRO:
return
with fits.open(path) as hdul:
data = hdul[0].data
data[~np.isfinite(data)] = 0.0
bary_observed = []
for islice in self.BARY_SLICES:
idata = data[islice]
yidx, xidx = np.indices((128, 128))
xbary = (idata * xidx).sum() / idata.sum()
ybary = (idata * yidx).sum() / idata.sum()
bary_observed.append((xbary, ybary))
nt.assert_array_almost_equal(bary_observed, bary_expected, decimal=5)
WCS512_BARYDATA = [
(63.44949378800272, 64.40535387506924),
(63.24744175084746, 63.67473452789256),
(65.22950207855361, 63.35629429568745),
(62.027396724898814, 62.815937534782144),
]
def test_basic_cli(self):
"""
Test the CLI interface. We don't go out of our way to validate the
computations in detail -- that's for the unit tests that probe the
module directly.
"""
expected = etree.fromstring(
self.WTML.replace('Thumbnail="thumb.jpg"', "").replace(
"<ThumbnailUrl>thumb.jpg</ThumbnailUrl>",
"<ThumbnailUrl></ThumbnailUrl>",
)
)
args = [
"tile-multi-tan",
"--hdu-index",
"0",
"--outdir",
self.work_path("basic_cli"),
test_path("wcs512.fits.gz"),
]
cli.entrypoint(args)
with open(
self.work_path("basic_cli", "index_rel.wtml"), "rt", encoding="utf8"
) as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = [
"cascade",
"--start",
"1",
self.work_path("basic_cli"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("basic_cli", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
def test_study_cli(self):
"""
Test tile-study on FITS. This should properly go in test_study.py, but
this file is the one that has the reference WTML information.
"""
expected = etree.fromstring(self.WTML)
args = [
"tile-study",
"--placeholder-thumbnail",
"--outdir",
self.work_path("study_cli"),
test_path("wcs512.fits.gz"),
]
cli.entrypoint(args)
with open(
self.work_path("study_cli", "index_rel.wtml"), "rt", encoding="utf8"
) as f:
observed = etree.fromstring(f.read())
assert_xml_elements_equal(observed, expected)
args = [
"cascade",
"--start",
"1",
self.work_path("study_cli"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("study_cli", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
@pytest.mark.skipif("not HAS_REPROJECT")
def test_as_multi_wcs(self):
"""
Once again, this doesn't super belong here, but this is where we have
the reference data. We don't compare the WTML contents here since the
reprojection isn't going to preserve the WCS in detail.
"""
from .. import builder, collection, multi_wcs, pyramid
reproject_function = reproject.reproject_interp
outdir = self.work_path("as_multi_wcs")
pio = pyramid.PyramidIO(outdir, default_format="fits")
bld = builder.Builder(pio)
coll = collection.SimpleFitsCollection(
[test_path("wcs512.fits.gz")], hdu_index=0
)
proc = multi_wcs.MultiWcsProcessor(coll)
proc.compute_global_pixelization(bld)
proc.tile(pio, reproject_function, cli_progress=False, parallel=1)
bld.write_index_rel_wtml()
args = [
"cascade",
"--start",
"1",
self.work_path("as_multi_wcs"),
]
cli.entrypoint(args)
self.maybe_test_barycenter(
self.work_path("as_multi_wcs", "0", "0", "0_0.fits"), self.WCS512_BARYDATA
)
| [
5,
6,
9,
10,
13
] |
1,262 | f5f9a1c7dcb7345e24f50db54649a1970fc37185 | <mask token>
| <mask token>
m.drawcoastlines()
m.fillcontinents(color='coral', lake_color='aqua')
m.drawparallels(np.arange(-90.0, 91.0, 30.0))
m.drawmeridians(np.arange(-180.0, 181.0, 60.0))
m.drawmapboundary(fill_color='aqua')
plt.title('Cylindrical Equal-Area Projection')
plt.show()
| <mask token>
m = Basemap(projection='cea', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,
urcrnrlon=180, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral', lake_color='aqua')
m.drawparallels(np.arange(-90.0, 91.0, 30.0))
m.drawmeridians(np.arange(-180.0, 181.0, 60.0))
m.drawmapboundary(fill_color='aqua')
plt.title('Cylindrical Equal-Area Projection')
plt.show()
| from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
m = Basemap(projection='cea', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,
urcrnrlon=180, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral', lake_color='aqua')
m.drawparallels(np.arange(-90.0, 91.0, 30.0))
m.drawmeridians(np.arange(-180.0, 181.0, 60.0))
m.drawmapboundary(fill_color='aqua')
plt.title('Cylindrical Equal-Area Projection')
plt.show()
| from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon
# are the lat/lon values of the lower left and upper right corners
# of the map.
# resolution = 'c' means use crude resolution coastlines.
m = Basemap(projection='cea',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=-180,urcrnrlon=180,resolution='c')
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,91.,30.))
m.drawmeridians(np.arange(-180.,181.,60.))
m.drawmapboundary(fill_color='aqua')
plt.title("Cylindrical Equal-Area Projection")
plt.show()
| [
0,
1,
2,
3,
4
] |
1,263 | 76dd4d2b5f68683c77f9502a2298e65c97db7c8d | <mask token>
| class ConfigError(ValueError):
pass
| null | null | null | [
0,
1
] |
1,264 | f16d43d9dfb3e9b9589fa92eb82aaa4c73fe48cd | <mask token>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
<mask token>
| <mask token>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
<mask token>
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
| <mask token>
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
@login_required
def queue(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': True})
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
| from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from orders.models import Setting
def search(request):
return render(request, 'ui/search.html')
def search_printed(request):
print_url = ''
setting = Setting.objects.filter(name='printer').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/search.html', {'print_url': print_url})
@login_required
def queue(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': True})
def queue_tablet(request):
print_url = ''
setting = Setting.objects.filter(name='printer_admin').first()
if setting != None:
print_url = setting.value
return render(request, 'ui/queue.html', {'print_url': print_url,
'footer': False})
| null | [
2,
3,
4,
5
] |
1,265 | dc81ab808720c3a2c76174264c9be9bcdd99c292 | <mask token>
def Msort(left, right):
if left + 1 < right:
mid = int((left + right) / 2)
Msort(left, mid)
Msort(mid, right)
merge(left, mid, right)
def main():
global ans
global A
n = int(input())
A = list(map(int, input().split()))
Msort(0, n)
print(' '.join(list(map(str, A))))
print(ans)
<mask token>
| <mask token>
def merge(left, mid, right):
global A
global ans
n1 = mid - left
n2 = right - mid
l = []
r = []
for i in range(n1):
l += [A[left + i]]
for i in range(n2):
r += [A[mid + i]]
l += [10 ** 18]
r += [10 ** 18]
i = 0
j = 0
ans += right - left
for k in range(left, right):
if l[i] <= r[j]:
A[k] = l[i]
i += 1
else:
A[k] = r[j]
j += 1
def Msort(left, right):
if left + 1 < right:
mid = int((left + right) / 2)
Msort(left, mid)
Msort(mid, right)
merge(left, mid, right)
def main():
global ans
global A
n = int(input())
A = list(map(int, input().split()))
Msort(0, n)
print(' '.join(list(map(str, A))))
print(ans)
<mask token>
| <mask token>
def merge(left, mid, right):
global A
global ans
n1 = mid - left
n2 = right - mid
l = []
r = []
for i in range(n1):
l += [A[left + i]]
for i in range(n2):
r += [A[mid + i]]
l += [10 ** 18]
r += [10 ** 18]
i = 0
j = 0
ans += right - left
for k in range(left, right):
if l[i] <= r[j]:
A[k] = l[i]
i += 1
else:
A[k] = r[j]
j += 1
def Msort(left, right):
if left + 1 < right:
mid = int((left + right) / 2)
Msort(left, mid)
Msort(mid, right)
merge(left, mid, right)
def main():
global ans
global A
n = int(input())
A = list(map(int, input().split()))
Msort(0, n)
print(' '.join(list(map(str, A))))
print(ans)
main()
| A = []
ans = 0
def merge(left, mid, right):
global A
global ans
n1 = mid - left
n2 = right - mid
l = []
r = []
for i in range(n1):
l += [A[left + i]]
for i in range(n2):
r += [A[mid + i]]
l += [10 ** 18]
r += [10 ** 18]
i = 0
j = 0
ans += right - left
for k in range(left, right):
if l[i] <= r[j]:
A[k] = l[i]
i += 1
else:
A[k] = r[j]
j += 1
def Msort(left, right):
if left + 1 < right:
mid = int((left + right) / 2)
Msort(left, mid)
Msort(mid, right)
merge(left, mid, right)
def main():
global ans
global A
n = int(input())
A = list(map(int, input().split()))
Msort(0, n)
print(' '.join(list(map(str, A))))
print(ans)
main()
| A = []
ans = 0
def merge(left, mid, right):
global A
global ans
n1 = mid - left
n2 = right - mid
l = []
r = []
for i in range(n1):
l += [A[left + i]]
for i in range(n2):
r += [A[mid + i]]
l += [10**18]
r += [10**18]
i = 0
j = 0
ans += right - left
for k in range(left, right):
if l[i] <= r[j]:
A[k] = l[i]
i += 1
else:
A[k] = r[j]
j += 1
def Msort(left, right):
if left + 1 < right:
mid = int((left + right)/2)
Msort(left, mid)
Msort(mid,right)
merge(left,mid,right)
def main():
global ans
global A
n = int(input())
A = list(map(int,input().split()))
Msort(0,n)
print(" ".join(list(map(str,A))))
print(ans)
main()
| [
2,
3,
4,
5,
6
] |
1,266 | a6192e39d86005882d0bde040a99f364bf701c3b | <mask token>
| def merge_sort(mlist):
if len(mlist) <= 1:
return mlist
mid = int(len(mlist) / 2)
left = merge_sort(mlist[:mid])
right = merge_sort(mlist[mid:])
return merge(left, right)
<mask token>
| def merge_sort(mlist):
if len(mlist) <= 1:
return mlist
mid = int(len(mlist) / 2)
left = merge_sort(mlist[:mid])
right = merge_sort(mlist[mid:])
return merge(left, right)
def merge(left, right):
"""
合并操作,将两个有序数组left[]和right[]合并成一个大的有序数组
:param left:
:param right:
:return:
"""
l, r = 0, 0
result = []
while l < len(left) and r < len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
result += left[l:]
result += right[r:]
return result
<mask token>
| def merge_sort(mlist):
if len(mlist) <= 1:
return mlist
mid = int(len(mlist) / 2)
left = merge_sort(mlist[:mid])
right = merge_sort(mlist[mid:])
return merge(left, right)
def merge(left, right):
"""
合并操作,将两个有序数组left[]和right[]合并成一个大的有序数组
:param left:
:param right:
:return:
"""
l, r = 0, 0
result = []
while l < len(left) and r < len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
result += left[l:]
result += right[r:]
return result
if __name__ == '__main__':
mlist = merge_sort([4, 5, 6, 7, 3, 2, 6, 9, 8])
print(mlist)
| # -*- coding: utf-8 -*-
def merge_sort(mlist):
if len(mlist) <= 1:
return mlist
mid = int(len(mlist) / 2)
# 使用递归将数组二分分解
left = merge_sort(mlist[:mid])
right = merge_sort(mlist[mid:])
return merge(left, right) # 将每次分解出来的数组各自排序,合并成一个大数组
def merge(left, right):
"""
合并操作,将两个有序数组left[]和right[]合并成一个大的有序数组
:param left:
:param right:
:return:
"""
l, r = 0, 0 # left与right数组的下标指针
result = []
while l < len(left) and r < len(right):
# 排序
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
result += left[l:]
result += right[r:]
return result
if __name__ == '__main__':
mlist = merge_sort([4, 5, 6, 7, 3, 2, 6, 9, 8])
print(mlist)
| [
0,
1,
2,
3,
4
] |
1,267 | 7cbf2082d530c315fdcfdb94f5c6ac4755ea2081 | <mask token>
class HBNBCommand(cmd.Cmd):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def emptyline(self):
"""Do nothing"""
pass
def do_create(self, line):
"""Creates a new instance of BaseModel"""
arg_line = line.split()
if line == '':
print('** class name missing **')
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if type(line) == str:
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print('** no instance found **')
else:
srch_id = line[0] + '.' + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print('** no instance found **')
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print('** no instance found **')
def do_all(self, line):
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) == 1:
pass
elif len_args == 2:
print('** attribute name missing **')
elif len_args == 3:
print('** value missing **')
else:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', '')
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print('** no instance found **')
<mask token>
def check_if_created(self, arg_line, len_args):
"""Verifies if class exists"""
if len_args == 0:
print('** class name missing **')
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif len_args == 1:
print('** instance id missing **')
return 1
def do_count(self, line):
"""Counts the number of existing instances"""
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
<mask token>
| <mask token>
class HBNBCommand(cmd.Cmd):
<mask token>
<mask token>
<mask token>
def do_quit(self, line):
"""Exit the CMD program"""
return True
<mask token>
def emptyline(self):
"""Do nothing"""
pass
def do_create(self, line):
"""Creates a new instance of BaseModel"""
arg_line = line.split()
if line == '':
print('** class name missing **')
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if type(line) == str:
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print('** no instance found **')
else:
srch_id = line[0] + '.' + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print('** no instance found **')
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print('** no instance found **')
def do_all(self, line):
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) == 1:
pass
elif len_args == 2:
print('** attribute name missing **')
elif len_args == 3:
print('** value missing **')
else:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', '')
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print('** no instance found **')
def default(self, line):
"""all method names that aren't defined"""
args_line = line.split('.')
if len(args_line) > 1:
if args_line[1] == 'all()':
self.do_all(args_line[0])
if args_line[1] == 'count()':
self.do_count(args_line[0])
my_count = args_line[1].split('"')
res = re.findall('\\(.*?\\)', args_line[1])
my_count[0] = my_count[0] + line[-1]
if my_count[0] == 'show()':
myNewList = [args_line[0], my_count[1]]
self.do_show(myNewList)
else:
cmd.Cmd.default(self, line)
def check_if_created(self, arg_line, len_args):
"""Verifies if class exists"""
if len_args == 0:
print('** class name missing **')
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif len_args == 1:
print('** instance id missing **')
return 1
def do_count(self, line):
"""Counts the number of existing instances"""
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
<mask token>
| <mask token>
class HBNBCommand(cmd.Cmd):
<mask token>
<mask token>
<mask token>
def do_quit(self, line):
"""Exit the CMD program"""
return True
def do_EOF(self, line):
"""Exit the CMD program"""
return True
def emptyline(self):
"""Do nothing"""
pass
def do_create(self, line):
"""Creates a new instance of BaseModel"""
arg_line = line.split()
if line == '':
print('** class name missing **')
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if type(line) == str:
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print('** no instance found **')
else:
srch_id = line[0] + '.' + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print('** no instance found **')
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print('** no instance found **')
def do_all(self, line):
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) == 1:
pass
elif len_args == 2:
print('** attribute name missing **')
elif len_args == 3:
print('** value missing **')
else:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', '')
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print('** no instance found **')
def default(self, line):
"""all method names that aren't defined"""
args_line = line.split('.')
if len(args_line) > 1:
if args_line[1] == 'all()':
self.do_all(args_line[0])
if args_line[1] == 'count()':
self.do_count(args_line[0])
my_count = args_line[1].split('"')
res = re.findall('\\(.*?\\)', args_line[1])
my_count[0] = my_count[0] + line[-1]
if my_count[0] == 'show()':
myNewList = [args_line[0], my_count[1]]
self.do_show(myNewList)
else:
cmd.Cmd.default(self, line)
def check_if_created(self, arg_line, len_args):
"""Verifies if class exists"""
if len_args == 0:
print('** class name missing **')
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif len_args == 1:
print('** instance id missing **')
return 1
def do_count(self, line):
"""Counts the number of existing instances"""
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
<mask token>
| <mask token>
import cmd
import models
import re
from models.base_model import BaseModel
from models import storage
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class HBNBCommand(cmd.Cmd):
""" This class to setup the command interpreter """
__DCT_CLS = {'BaseModel': BaseModel, 'User': User, 'State': State,
'City': City, 'Amenity': Amenity, 'Place': Place, 'Review': Review}
prompt = '(hbnb) '
def do_quit(self, line):
"""Exit the CMD program"""
return True
def do_EOF(self, line):
"""Exit the CMD program"""
return True
def emptyline(self):
"""Do nothing"""
pass
def do_create(self, line):
"""Creates a new instance of BaseModel"""
arg_line = line.split()
if line == '':
print('** class name missing **')
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if type(line) == str:
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print('** no instance found **')
else:
srch_id = line[0] + '.' + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print('** no instance found **')
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) != 1:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print('** no instance found **')
def do_all(self, line):
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if self.check_if_created(arg_line, len_args) == 1:
pass
elif len_args == 2:
print('** attribute name missing **')
elif len_args == 3:
print('** value missing **')
else:
get_inst = arg_line[0] + '.' + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', '')
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print('** no instance found **')
def default(self, line):
"""all method names that aren't defined"""
args_line = line.split('.')
if len(args_line) > 1:
if args_line[1] == 'all()':
self.do_all(args_line[0])
if args_line[1] == 'count()':
self.do_count(args_line[0])
my_count = args_line[1].split('"')
res = re.findall('\\(.*?\\)', args_line[1])
my_count[0] = my_count[0] + line[-1]
if my_count[0] == 'show()':
myNewList = [args_line[0], my_count[1]]
self.do_show(myNewList)
else:
cmd.Cmd.default(self, line)
def check_if_created(self, arg_line, len_args):
"""Verifies if class exists"""
if len_args == 0:
print('** class name missing **')
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif len_args == 1:
print('** instance id missing **')
return 1
def do_count(self, line):
"""Counts the number of existing instances"""
arg_line = line.split()
if line == '' or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
if __name__ == '__main__':
HBNBCommand().cmdloop()
| #!/usr/bin/python3
"""
program of the command interpreter
"""
import cmd
import models
import re
from models.base_model import BaseModel
from models import storage
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class HBNBCommand(cmd.Cmd):
""" This class to setup the command interpreter """
__DCT_CLS = {
"BaseModel": BaseModel,
"User": User,
"State": State,
"City": City,
"Amenity": Amenity,
"Place": Place,
"Review": Review
}
prompt = "(hbnb) "
def do_quit(self, line):
'''Exit the CMD program'''
return True
def do_EOF(self, line):
'''Exit the CMD program'''
return True
def emptyline(self):
'''Do nothing'''
pass
def do_create(self, line):
'''Creates a new instance of BaseModel'''
arg_line = line.split()
if line == "":
print("** class name missing **")
return False
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
else:
new_instance = self.__DCT_CLS[arg_line[0]]()
print(new_instance.id)
new_instance.save()
def do_show(self, line):
if (type(line) == str):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) != 1):
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
print(dict_classes[get_inst])
else:
print("** no instance found **")
else:
srch_id = line[0] + "." + line[1]
dict_classes = models.storage.all()
if srch_id in dict_classes.keys():
print(dict_classes[srch_id])
else:
print("** no instance found **")
def do_destroy(self, line):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) != 1):
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
del dict_classes[get_inst]
models.storage.save()
else:
print("** no instance found **")
def do_all(self, line):
arg_line = line.split()
if line == "" or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
print(list_classes)
else:
print("** class doesn't exist **")
def do_update(self, line):
arg_line = line.split()
len_args = len(arg_line)
if (self.check_if_created(arg_line, len_args) == 1):
pass
elif (len_args == 2):
print("** attribute name missing **")
elif (len_args == 3):
print("** value missing **")
else:
get_inst = arg_line[0] + "." + arg_line[1]
dict_classes = models.storage.all()
if get_inst in dict_classes.keys():
if arg_line[3]:
arg_line[3] = arg_line[3].replace('"', "")
try:
arg_line[3] = int(arg_line[3])
except ValueError:
try:
arg_line[3] = float(arg_line[3])
except ValueError:
arg_line[3] = arg_line[3]
dict_classes[get_inst].__dict__[arg_line[2]] = arg_line[3]
dict_classes[get_inst].save()
else:
print("** no instance found **")
def default(self, line):
'''all method names that aren't defined'''
args_line = line.split('.')
if len(args_line) > 1:
if args_line[1] == "all()":
self.do_all(args_line[0])
if args_line[1] == "count()":
self.do_count(args_line[0])
my_count = args_line[1].split('"')
res = re.findall(r'\(.*?\)', args_line[1])
my_count[0] = my_count[0] + line[-1]
if my_count[0] == "show()":
myNewList = [args_line[0], my_count[1]]
self.do_show(myNewList)
else:
cmd.Cmd.default(self, line)
def check_if_created(self, arg_line, len_args):
'''Verifies if class exists'''
if len_args == 0:
print("** class name missing **")
return 1
elif arg_line[0] not in self.__DCT_CLS:
print("** class doesn't exist **")
return 1
elif (len_args == 1):
print("** instance id missing **")
return 1
def do_count(self, line):
'''Counts the number of existing instances'''
arg_line = line.split()
if line == "" or arg_line[0] in self.__DCT_CLS:
dir_classes = models.storage.all()
list_classes = []
count = 0
for key, value in dir_classes.items():
if line in key:
list_classes.append(value.__str__())
count += 1
print(count)
else:
print("** class doesn't exist **")
if __name__ == "__main__":
HBNBCommand().cmdloop()
| [
9,
11,
12,
16,
17
] |
1,268 | 4db8b4403dd9064b7d5f935d4b9d111508c965fb | <mask token>
def dashboard(request):
context = {'context_list': ContextDefinition.objects.filter(Q(owner=
request.user) & Q(inherited=False) & Q(abstract=False)).order_by(
'-public', 'name'), 'full_abstract_list': get_list_allowed_abstract
(request), 'my_abstract_list': ContextDefinition.objects.filter(Q(
owner=request.user) & Q(inherited=False) & Q(abstract=True)).
order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(
owner=request.user).order_by('-public', 'name'), 'machine_list':
Machines.objects.filter(owner=request.user)}
context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS
push_to_context('redirect_msg_info', 'msg_info', context, request)
push_to_context('redirect_msg_error', 'msg_error', context, request)
push_to_context('redirect_msg_warning', 'msg_warning', context, request)
push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)
return uncache_response(render_to_response('pages/dashboard.html',
context, RequestContext(request)))
<mask token>
| <mask token>
def welcome(request):
return render_to_response('pages/welcome.html', {}, RequestContext(request)
)
def dashboard(request):
context = {'context_list': ContextDefinition.objects.filter(Q(owner=
request.user) & Q(inherited=False) & Q(abstract=False)).order_by(
'-public', 'name'), 'full_abstract_list': get_list_allowed_abstract
(request), 'my_abstract_list': ContextDefinition.objects.filter(Q(
owner=request.user) & Q(inherited=False) & Q(abstract=True)).
order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(
owner=request.user).order_by('-public', 'name'), 'machine_list':
Machines.objects.filter(owner=request.user)}
context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS
push_to_context('redirect_msg_info', 'msg_info', context, request)
push_to_context('redirect_msg_error', 'msg_error', context, request)
push_to_context('redirect_msg_warning', 'msg_warning', context, request)
push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)
return uncache_response(render_to_response('pages/dashboard.html',
context, RequestContext(request)))
def test(request):
raw = (
'<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'
)
return render_to_response('core/raw.html', {'body': raw},
RequestContext(request))
<mask token>
| <mask token>
def welcome(request):
return render_to_response('pages/welcome.html', {}, RequestContext(request)
)
def dashboard(request):
context = {'context_list': ContextDefinition.objects.filter(Q(owner=
request.user) & Q(inherited=False) & Q(abstract=False)).order_by(
'-public', 'name'), 'full_abstract_list': get_list_allowed_abstract
(request), 'my_abstract_list': ContextDefinition.objects.filter(Q(
owner=request.user) & Q(inherited=False) & Q(abstract=True)).
order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(
owner=request.user).order_by('-public', 'name'), 'machine_list':
Machines.objects.filter(owner=request.user)}
context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS
push_to_context('redirect_msg_info', 'msg_info', context, request)
push_to_context('redirect_msg_error', 'msg_error', context, request)
push_to_context('redirect_msg_warning', 'msg_warning', context, request)
push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)
return uncache_response(render_to_response('pages/dashboard.html',
context, RequestContext(request)))
def test(request):
raw = (
'<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'
)
return render_to_response('core/raw.html', {'body': raw},
RequestContext(request))
def push_to_context(sessionName, contextName, context, request):
if sessionName in request.session:
context[contextName] = request.session[sessionName]
del request.session[sessionName]
| from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import Q
from cvmo import settings
from cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry
from cvmo.context.plugins import ContextPlugins
from cvmo.context.utils.views import uncache_response
from cvmo.context.utils.views import get_list_allowed_abstract
def welcome(request):
return render_to_response('pages/welcome.html', {}, RequestContext(request)
)
def dashboard(request):
context = {'context_list': ContextDefinition.objects.filter(Q(owner=
request.user) & Q(inherited=False) & Q(abstract=False)).order_by(
'-public', 'name'), 'full_abstract_list': get_list_allowed_abstract
(request), 'my_abstract_list': ContextDefinition.objects.filter(Q(
owner=request.user) & Q(inherited=False) & Q(abstract=True)).
order_by('name'), 'cluster_list': ClusterDefinition.objects.filter(
owner=request.user).order_by('-public', 'name'), 'machine_list':
Machines.objects.filter(owner=request.user)}
context['webapi_configurations'] = settings.WEBAPI_CONFIGURATIONS
push_to_context('redirect_msg_info', 'msg_info', context, request)
push_to_context('redirect_msg_error', 'msg_error', context, request)
push_to_context('redirect_msg_warning', 'msg_warning', context, request)
push_to_context('redirect_msg_confirm', 'msg_confirm', context, request)
return uncache_response(render_to_response('pages/dashboard.html',
context, RequestContext(request)))
def test(request):
raw = (
'<h1>404 - Not found</h1><p>This is not the website you are looking for</p>'
)
return render_to_response('core/raw.html', {'body': raw},
RequestContext(request))
def push_to_context(sessionName, contextName, context, request):
if sessionName in request.session:
context[contextName] = request.session[sessionName]
del request.session[sessionName]
| from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db.models import Q
from cvmo import settings
from cvmo.context.models import ContextDefinition, Machines, ClusterDefinition, MarketplaceContextEntry
from cvmo.context.plugins import ContextPlugins
from cvmo.context.utils.views import uncache_response
from cvmo.context.utils.views import get_list_allowed_abstract
def welcome(request):
return render_to_response('pages/welcome.html', {}, RequestContext(request))
def dashboard(request):
context = {
'context_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=False)).order_by('-public', 'name'),
'full_abstract_list': get_list_allowed_abstract(request),
'my_abstract_list': ContextDefinition.objects.filter(Q(owner=request.user) & Q(inherited=False) & Q(abstract=True)).order_by('name'),
'cluster_list': ClusterDefinition.objects.filter(owner=request.user).order_by('-public', 'name'),
'machine_list': Machines.objects.filter(owner=request.user)
}
context["webapi_configurations"] = settings.WEBAPI_CONFIGURATIONS
push_to_context("redirect_msg_info", "msg_info", context, request)
push_to_context("redirect_msg_error", "msg_error", context, request)
push_to_context("redirect_msg_warning", "msg_warning", context, request)
push_to_context("redirect_msg_confirm", "msg_confirm", context, request)
return uncache_response(render_to_response('pages/dashboard.html', context, RequestContext(request)))
def test(request):
raw = "<h1>404 - Not found</h1><p>This is not the website you are looking for</p>"
return render_to_response('core/raw.html', {'body': raw}, RequestContext(request))
def push_to_context(sessionName, contextName, context, request):
if sessionName in request.session:
context[contextName] = request.session[sessionName]
del request.session[sessionName]
| [
1,
3,
4,
5,
6
] |
1,269 | 816c11717c4f26b9013f7a83e1dfb2c0578cbcf8 | <mask token>
class MongoStorage(object):
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, connection):
self._connection = connection
self._collection = connection.objects
self._roots = connection.roots
root_doc = self._roots.find_one()
if root_doc is None:
self._root_id = self._roots.save({'list': []})
else:
self._root_id = root_doc['_id']
def add_to_roots(self, container_id):
self._roots.update({'_id': self._root_id}, {'$push': {'list':
container_id}})
def store_new_item(self, doc):
"""Save the new document."""
self._collection.save(doc.document)
def store_child(self, child_id, parent_id):
self._collection.update({'_id': parent_id}, {'$push': {'contents':
child_id}})
<mask token>
def load_one_item(self, item_id):
return Record.from_document(self._collection.find_one(item_id))
def load_many_items(self, item_ids):
query = {'_id': {'$in': item_ids}}
results = dict((d['_id'], Record.from_document(d)) for d in self.
_collection.find(query))
return (results[i] for i in item_ids)
| <mask token>
class MongoStorage(object):
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, connection):
self._connection = connection
self._collection = connection.objects
self._roots = connection.roots
root_doc = self._roots.find_one()
if root_doc is None:
self._root_id = self._roots.save({'list': []})
else:
self._root_id = root_doc['_id']
def add_to_roots(self, container_id):
self._roots.update({'_id': self._root_id}, {'$push': {'list':
container_id}})
def store_new_item(self, doc):
"""Save the new document."""
self._collection.save(doc.document)
def store_child(self, child_id, parent_id):
self._collection.update({'_id': parent_id}, {'$push': {'contents':
child_id}})
def get_root_ids(self):
return self._roots.find_one(self._root_id)['list']
def load_one_item(self, item_id):
return Record.from_document(self._collection.find_one(item_id))
def load_many_items(self, item_ids):
query = {'_id': {'$in': item_ids}}
results = dict((d['_id'], Record.from_document(d)) for d in self.
_collection.find(query))
return (results[i] for i in item_ids)
| <mask token>
class MongoStorage(object):
_collection = None
_connection = None
_root_id = None
_roots = None
def __init__(self, connection):
self._connection = connection
self._collection = connection.objects
self._roots = connection.roots
root_doc = self._roots.find_one()
if root_doc is None:
self._root_id = self._roots.save({'list': []})
else:
self._root_id = root_doc['_id']
def add_to_roots(self, container_id):
self._roots.update({'_id': self._root_id}, {'$push': {'list':
container_id}})
def store_new_item(self, doc):
"""Save the new document."""
self._collection.save(doc.document)
def store_child(self, child_id, parent_id):
self._collection.update({'_id': parent_id}, {'$push': {'contents':
child_id}})
def get_root_ids(self):
return self._roots.find_one(self._root_id)['list']
def load_one_item(self, item_id):
return Record.from_document(self._collection.find_one(item_id))
def load_many_items(self, item_ids):
query = {'_id': {'$in': item_ids}}
results = dict((d['_id'], Record.from_document(d)) for d in self.
_collection.find(query))
return (results[i] for i in item_ids)
| from yama.record import Record
class MongoStorage(object):
_collection = None
_connection = None
_root_id = None
_roots = None
def __init__(self, connection):
self._connection = connection
self._collection = connection.objects
self._roots = connection.roots
root_doc = self._roots.find_one()
if root_doc is None:
self._root_id = self._roots.save({'list': []})
else:
self._root_id = root_doc['_id']
def add_to_roots(self, container_id):
self._roots.update({'_id': self._root_id}, {'$push': {'list':
container_id}})
def store_new_item(self, doc):
"""Save the new document."""
self._collection.save(doc.document)
def store_child(self, child_id, parent_id):
self._collection.update({'_id': parent_id}, {'$push': {'contents':
child_id}})
def get_root_ids(self):
return self._roots.find_one(self._root_id)['list']
def load_one_item(self, item_id):
return Record.from_document(self._collection.find_one(item_id))
def load_many_items(self, item_ids):
query = {'_id': {'$in': item_ids}}
results = dict((d['_id'], Record.from_document(d)) for d in self.
_collection.find(query))
return (results[i] for i in item_ids)
| null | [
7,
8,
9,
10
] |
1,270 | ca5057a5fdfef0edf4cf0c3ff3e2a371907ca4ee | <mask token>
| <mask token>
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
<mask token>
| <mask token>
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == '__main__':
main()
| import tkinter as tk
import tkinter.ttk as ttk
import GUIForm
import sys
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == '__main__':
main()
| import tkinter as tk
import tkinter.ttk as ttk
import GUIForm
import sys
def main():
global window
global _form
print("You are using Python {}.{}.{}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
window=tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == "__main__":
main() | [
0,
1,
2,
3,
4
] |
1,271 | 11a7ebac3dad1f91a6d46b62f557b51ded8e3d7a | <mask token>
| def euclidean(p, q):
sumSq = 0.0
for i in range(len(p)):
sumSq += (p[i] - q[i]) ** 2
return sumSq ** 0.5
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ユークリッド距離
# http://en.wikipedia.org/wiki/Euclidean_space
# 多次元空間中での 2 点間の距離を探索する
def euclidean(p,q):
sumSq=0.0
# 差の平方を加算
for i in range(len(p)):
sumSq+=(p[i]-q[i])**2
# 平方根
return (sumSq**0.5)
#print euclidean([3,4,5],[4,5,6])
| null | null | [
0,
1,
2
] |
1,272 | ea8676a4c55bbe0ae2ff8abf924accfc0bd8f661 | <mask token>
def version_100_iq(limit):
nums = []
for x in range(2, limit):
facs = 0
n = x
for p in primes:
if n % p == 0:
facs += 1
while n % p == 0:
n //= p
if n == 1 or facs >= 4:
break
if facs >= 4:
nums.append(x)
return set(nums)
def version_1(limit):
def search(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return 0
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
not_used = prod % primes[i] != 0
count = num_distinct >= 4 and not not_used
count += search(prod * primes[i], i, num_distinct + not_used)
count += search(prod, i + 1, num_distinct)
return count
return search(1, 0, 0)
def version_2(limit):
def search(prod, i, num_distinct):
if prod >= limit:
return
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
if i >= len(primes):
return
search(prod * primes[i], i + 1, num_distinct + 1)
search(prod, i + 1, num_distinct)
return search(1, 0, 0)
def find_prods_by_num_distinct_primes(limit, primes):
prods_by_num_distinct = [set() for _ in range(5)]
prods_by_num_distinct[0] |= {1}
def add_prods(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return
prods_by_num_distinct[min(num_distinct, 4)].add(prod)
add_prods(prod * primes[i], i + 1, num_distinct + 1)
add_prods(prod, i + 1, num_distinct)
add_prods(1, 0, 0)
return [sorted(s) for s in prods_by_num_distinct]
<mask token>
| <mask token>
start_time()
<mask token>
print('limit=', limit)
<mask token>
def version_100_iq(limit):
nums = []
for x in range(2, limit):
facs = 0
n = x
for p in primes:
if n % p == 0:
facs += 1
while n % p == 0:
n //= p
if n == 1 or facs >= 4:
break
if facs >= 4:
nums.append(x)
return set(nums)
def version_1(limit):
def search(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return 0
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
not_used = prod % primes[i] != 0
count = num_distinct >= 4 and not not_used
count += search(prod * primes[i], i, num_distinct + not_used)
count += search(prod, i + 1, num_distinct)
return count
return search(1, 0, 0)
def version_2(limit):
def search(prod, i, num_distinct):
if prod >= limit:
return
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
if i >= len(primes):
return
search(prod * primes[i], i + 1, num_distinct + 1)
search(prod, i + 1, num_distinct)
return search(1, 0, 0)
def find_prods_by_num_distinct_primes(limit, primes):
prods_by_num_distinct = [set() for _ in range(5)]
prods_by_num_distinct[0] |= {1}
def add_prods(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return
prods_by_num_distinct[min(num_distinct, 4)].add(prod)
add_prods(prod * primes[i], i + 1, num_distinct + 1)
add_prods(prod, i + 1, num_distinct)
add_prods(1, 0, 0)
return [sorted(s) for s in prods_by_num_distinct]
version_2(limit)
<mask token>
for n in sorted(v1):
for mult in range(1, 401):
if mult * n >= limit:
break
if mult in pset:
continue
if n * mult in res:
print(n, mult, n * mult)
res.add(n * mult)
count += 1
else:
print('not enough huh...', n)
count += (limit - 100 * n) // n
print(len(res))
print('Solution:', count)
<mask token>
print('100 IQ version:', len(iq_100))
end_time()
| <mask token>
start_time()
primes = read_primes(100)
<mask token>
limit = 43268
print('limit=', limit)
v1 = set()
v2 = set()
def version_100_iq(limit):
nums = []
for x in range(2, limit):
facs = 0
n = x
for p in primes:
if n % p == 0:
facs += 1
while n % p == 0:
n //= p
if n == 1 or facs >= 4:
break
if facs >= 4:
nums.append(x)
return set(nums)
def version_1(limit):
def search(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return 0
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
not_used = prod % primes[i] != 0
count = num_distinct >= 4 and not not_used
count += search(prod * primes[i], i, num_distinct + not_used)
count += search(prod, i + 1, num_distinct)
return count
return search(1, 0, 0)
def version_2(limit):
def search(prod, i, num_distinct):
if prod >= limit:
return
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
if i >= len(primes):
return
search(prod * primes[i], i + 1, num_distinct + 1)
search(prod, i + 1, num_distinct)
return search(1, 0, 0)
def find_prods_by_num_distinct_primes(limit, primes):
prods_by_num_distinct = [set() for _ in range(5)]
prods_by_num_distinct[0] |= {1}
def add_prods(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return
prods_by_num_distinct[min(num_distinct, 4)].add(prod)
add_prods(prod * primes[i], i + 1, num_distinct + 1)
add_prods(prod, i + 1, num_distinct)
add_prods(1, 0, 0)
return [sorted(s) for s in prods_by_num_distinct]
version_2(limit)
pset = set(primes)
res = set()
count = 0
for n in sorted(v1):
for mult in range(1, 401):
if mult * n >= limit:
break
if mult in pset:
continue
if n * mult in res:
print(n, mult, n * mult)
res.add(n * mult)
count += 1
else:
print('not enough huh...', n)
count += (limit - 100 * n) // n
print(len(res))
print('Solution:', count)
iq_100 = version_100_iq(limit)
print('100 IQ version:', len(iq_100))
end_time()
| from lib.utility import start_time, end_time
from lib.prime import read_primes
from bisect import bisect_left
start_time()
primes = read_primes(100)
import random
limit = 43268
print('limit=', limit)
v1 = set()
v2 = set()
def version_100_iq(limit):
nums = []
for x in range(2, limit):
facs = 0
n = x
for p in primes:
if n % p == 0:
facs += 1
while n % p == 0:
n //= p
if n == 1 or facs >= 4:
break
if facs >= 4:
nums.append(x)
return set(nums)
def version_1(limit):
def search(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return 0
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
not_used = prod % primes[i] != 0
count = num_distinct >= 4 and not not_used
count += search(prod * primes[i], i, num_distinct + not_used)
count += search(prod, i + 1, num_distinct)
return count
return search(1, 0, 0)
def version_2(limit):
def search(prod, i, num_distinct):
if prod >= limit:
return
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
if i >= len(primes):
return
search(prod * primes[i], i + 1, num_distinct + 1)
search(prod, i + 1, num_distinct)
return search(1, 0, 0)
def find_prods_by_num_distinct_primes(limit, primes):
prods_by_num_distinct = [set() for _ in range(5)]
prods_by_num_distinct[0] |= {1}
def add_prods(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return
prods_by_num_distinct[min(num_distinct, 4)].add(prod)
add_prods(prod * primes[i], i + 1, num_distinct + 1)
add_prods(prod, i + 1, num_distinct)
add_prods(1, 0, 0)
return [sorted(s) for s in prods_by_num_distinct]
version_2(limit)
pset = set(primes)
res = set()
count = 0
for n in sorted(v1):
for mult in range(1, 401):
if mult * n >= limit:
break
if mult in pset:
continue
if n * mult in res:
print(n, mult, n * mult)
res.add(n * mult)
count += 1
else:
print('not enough huh...', n)
count += (limit - 100 * n) // n
print(len(res))
print('Solution:', count)
iq_100 = version_100_iq(limit)
print('100 IQ version:', len(iq_100))
end_time()
| from lib.utility import start_time, end_time
from lib.prime import read_primes
from bisect import bisect_left
start_time()
primes = read_primes(100)
# limit = 10 ** 16
import random
# limit = random.randint(1000, 10 ** 5)
limit = 43268
# limit = 10 ** 16
print('limit=', limit)
v1 = set()
v2 = set()
def version_100_iq(limit):
nums = []
for x in range(2, limit):
facs = 0
n = x
for p in primes:
if n % p == 0:
facs += 1
while n % p == 0:
n //= p
if n == 1 or facs >= 4:
break
if facs >= 4:
nums.append(x)
return set(nums)
def version_1(limit):
def search(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return 0
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
not_used = (prod % primes[i] != 0)
count = (num_distinct >= 4) and not not_used
count += search(prod * primes[i], i, num_distinct + not_used)
count += search(prod, i + 1, num_distinct)
return count
return search(1, 0, 0)
def version_2(limit):
def search(prod, i, num_distinct):
if prod >= limit:
return
if prod not in v1 and num_distinct >= 4:
v1.add(prod)
if i >= len(primes):
return
search(prod * primes[i], i + 1, num_distinct + 1)
search(prod, i + 1, num_distinct)
return search(1, 0, 0)
def find_prods_by_num_distinct_primes(limit, primes):
prods_by_num_distinct = [set() for _ in range(5)]
prods_by_num_distinct[0] |= {1}
def add_prods(prod, i, num_distinct):
if prod >= limit or i >= len(primes):
return
# not_used = (prod % primes[i] != 0)
# if not not_used:
prods_by_num_distinct[min(num_distinct, 4)].add(prod)
add_prods(prod * primes[i], i + 1, num_distinct + 1)
add_prods(prod, i + 1, num_distinct)
add_prods(1, 0, 0)
return [sorted(s) for s in prods_by_num_distinct]
version_2(limit)
pset = set(primes)
res = set()
count = 0
for n in sorted(v1):
for mult in range(1, 401):
if mult * n >= limit:
break
# if n % mult != 0 and mult in pset:
if mult in pset:
# assert(n * mult in v1), (n, mult)
continue
# print(n, mult)
if n * mult in res:
print(n, mult, n * mult)
res.add(n * mult)
count += 1
else:
print('not enough huh...', n)
count += (limit - 100*n) // n
print(len(res))
# n = 7 # a splitting point that seems to be the fastest
# lo = find_prods_by_num_distinct_primes(limit, primes[:n])
# hi = find_prods_by_num_distinct_primes(limit, primes[n:])
# max_sol = 0
# count = 0
# for lo_num_distinct_primes in range(0, 5):
# for prod in lo[lo_num_distinct_primes]:
# for hi_num_distinct_primes in range(4 - lo_num_distinct_primes, 5):
# # count += bisect_left(hi[hi_num_distinct_primes], limit / prod)
# for v in hi[hi_num_distinct_primes]:
# if v * prod < limit:
# count += 1
# # count += (limit - 1) // (v * prod)
print('Solution:', count)
iq_100 = version_100_iq(limit)
print('100 IQ version:', len(iq_100))
# if count != len(iq_100):
# print(iq_100 - v1)
# assert count == len(iq_100)
end_time()
| [
4,
5,
6,
7,
8
] |
1,273 | fa081ccd8081f5c3319f482b7d8abd7415d8e757 | '''
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Note: A leaf is a node with no children.
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
depth = []
def maxDepth_helper(self, root, cur_d):
if root.left is None and root.right is None:
self.depth.append(cur_d)
return
elif root.left is not None and root.right is None:
self.maxDepth_helper(root.left, cur_d += 1)
elif root.right is not None and root.left is None:
self.maxDepth_helper(root.right, cur_d += 1)
else:
self.maxDepth_helper(root.left, cur_d += 1)
self.maxDepth_helper(root.right, cur_d += 1)
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.maxDepth_helper(root, 0)
return max(depth)
| null | null | null | null | [
0
] |
1,274 | af442d4a78930a0ebcd85a1cdfe4aa86461be5c1 | <mask token>
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError(
'New password must be different than the old password')
if user.first_name != '' and user.first_name.lower(
) in new_password.lower(
) or user.last_name != '' and user.last_name.lower(
) in new_password.lower():
raise forms.ValidationError(
'You cannot use personal information in your password')
if new_password.isupper() or new_password.islower():
raise forms.ValidationError(
'Password must contain uppercase and lowercase letters')
if re.match('^[a-zA-Z0-9]*$', new_password):
raise forms.ValidationError(
'Password must contain a special character')
return self.cleaned_data
| <mask token>
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email',
'email_confirmation', 'bio', 'avatar']
<mask token>
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError(
'New password must be different than the old password')
if user.first_name != '' and user.first_name.lower(
) in new_password.lower(
) or user.last_name != '' and user.last_name.lower(
) in new_password.lower():
raise forms.ValidationError(
'You cannot use personal information in your password')
if new_password.isupper() or new_password.islower():
raise forms.ValidationError(
'Password must contain uppercase and lowercase letters')
if re.match('^[a-zA-Z0-9]*$', new_password):
raise forms.ValidationError(
'Password must contain a special character')
return self.cleaned_data
| <mask token>
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email',
'email_confirmation', 'bio', 'avatar']
def clean(self, *args, **kwargs):
cleaned_data = super(EditProfileModelForm, self).clean()
email = self.cleaned_data.get('email')
email_confirmation = self.cleaned_data.get('email_confirmation')
if email and email_confirmation and email != email_confirmation:
raise forms.ValidationError('Emails do not match')
return cleaned_data
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError(
'New password must be different than the old password')
if user.first_name != '' and user.first_name.lower(
) in new_password.lower(
) or user.last_name != '' and user.last_name.lower(
) in new_password.lower():
raise forms.ValidationError(
'You cannot use personal information in your password')
if new_password.isupper() or new_password.islower():
raise forms.ValidationError(
'Password must contain uppercase and lowercase letters')
if re.match('^[a-zA-Z0-9]*$', new_password):
raise forms.ValidationError(
'Password must contain a special character')
return self.cleaned_data
| import re
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.password_validation import validate_password
from .models import Account
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email',
'email_confirmation', 'bio', 'avatar']
def clean(self, *args, **kwargs):
cleaned_data = super(EditProfileModelForm, self).clean()
email = self.cleaned_data.get('email')
email_confirmation = self.cleaned_data.get('email_confirmation')
if email and email_confirmation and email != email_confirmation:
raise forms.ValidationError('Emails do not match')
return cleaned_data
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError(
'New password must be different than the old password')
if user.first_name != '' and user.first_name.lower(
) in new_password.lower(
) or user.last_name != '' and user.last_name.lower(
) in new_password.lower():
raise forms.ValidationError(
'You cannot use personal information in your password')
if new_password.isupper() or new_password.islower():
raise forms.ValidationError(
'Password must contain uppercase and lowercase letters')
if re.match('^[a-zA-Z0-9]*$', new_password):
raise forms.ValidationError(
'Password must contain a special character')
return self.cleaned_data
| import re
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.password_validation import validate_password
from .models import Account
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email', 'email_confirmation', 'bio', 'avatar']
def clean(self, *args, **kwargs):
cleaned_data = super(EditProfileModelForm, self).clean()
email = self.cleaned_data.get('email')
email_confirmation = self.cleaned_data.get('email_confirmation')
if email and email_confirmation and email != email_confirmation:
raise forms.ValidationError("Emails do not match")
return cleaned_data
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError("New password must be different than the old password")
if (user.first_name != "" and user.first_name.lower() in new_password.lower()
or user.last_name != "" and user.last_name.lower() in new_password.lower()):
raise forms.ValidationError("You cannot use personal information in your password")
if new_password.isupper() or new_password.islower():
raise forms.ValidationError("Password must contain uppercase and lowercase letters")
if re.match("^[a-zA-Z0-9]*$", new_password):
raise forms.ValidationError("Password must contain a special character")
return self.cleaned_data
| [
3,
4,
5,
6,
7
] |
1,275 | 07a546928df1acfedf7a7735dc813de9da8373e0 | <mask token>
| <mask token>
print(todayFormatted)
<mask token>
os.chdir(basepath)
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
def parse():
for line in content:
if line[40:60] != ' ':
os.chdir(type1_path)
open('Type1Results.txt', 'a').write(line + '\n')
elif line[40:60] == ' ':
os.chdir(type2_path)
open('Type2Results.txt', 'a').write(line + '\n')
parse()
| <mask token>
today = date.today()
todayFormatted = today.strftime('%m%d%Y')
print(todayFormatted)
basefilename = 'PartOfFileNameYouAreLookingFor'
basepath = '\\\\Test/Path/Base/'
type1_path = '\\\\Test/Path/Base/Type1'
type2_path = '\\\\Test/Path/Base/Type2'
filename = basefilename + todayFormatted + '.txt'
os.chdir(basepath)
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
def parse():
for line in content:
if line[40:60] != ' ':
os.chdir(type1_path)
open('Type1Results.txt', 'a').write(line + '\n')
elif line[40:60] == ' ':
os.chdir(type2_path)
open('Type2Results.txt', 'a').write(line + '\n')
parse()
| import os.path
from datetime import date
today = date.today()
todayFormatted = today.strftime('%m%d%Y')
print(todayFormatted)
basefilename = 'PartOfFileNameYouAreLookingFor'
basepath = '\\\\Test/Path/Base/'
type1_path = '\\\\Test/Path/Base/Type1'
type2_path = '\\\\Test/Path/Base/Type2'
filename = basefilename + todayFormatted + '.txt'
os.chdir(basepath)
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
def parse():
for line in content:
if line[40:60] != ' ':
os.chdir(type1_path)
open('Type1Results.txt', 'a').write(line + '\n')
elif line[40:60] == ' ':
os.chdir(type2_path)
open('Type2Results.txt', 'a').write(line + '\n')
parse()
| ##This script looks at a path for a dated file, then parses it by row into two different files/folders based on fields being blank within each row.
import os.path
from datetime import date
##sets date variables/format
today = date.today()
todayFormatted = today.strftime("%m%d%Y")
print(todayFormatted)
##Sets variable for the base file name
basefilename = "PartOfFileNameYouAreLookingFor"
basepath = '\\\\Test/Path/Base/'
type1_path = '\\\\Test/Path/Base/Type1'
type2_path = '\\\\Test/Path/Base/Type2'
filename = basefilename + todayFormatted + '.txt'
os.chdir(basepath)
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
def parse():
for line in content:
if line[40:60] != " ": ##This usecase looks for a specific field/position in a file row to be blank
## print(line[40:60])
os.chdir(type1_path)
open('Type1Results.txt', 'a').write(line + '\n')
elif line[40:60] == " ":
os.chdir(type2_path)
open('Type2Results.txt', 'a').write(line + '\n')
parse()
| [
0,
2,
3,
4,
5
] |
1,276 | bd00644b9cf019fe8c86d52494389b7f0f03d3c3 | <mask token>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
<mask token>
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
<mask token>
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
| <mask token>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
<mask token>
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
| <mask token>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
| from __future__ import print_function, division, absolute_import
import pytest
import os
from flask import template_rendered
from flipper.app import create_app
from contextlib import contextmanager
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
| # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-08-16 11:43:42
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-08-16 11:58:06
from __future__ import print_function, division, absolute_import
import pytest
import os
from flask import template_rendered
from flipper.app import create_app
from contextlib import contextmanager
@contextmanager
def captured_templates(app):
''' Records which templates are used '''
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
''' Fixture that returns which jinja template used '''
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
''' Flask application '''
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
''' Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
'''
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
''' Fixture to create an app with a test Flask base url
Returns the client fixture
'''
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
# global releases to loop over
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
''' Fixture to monkeypatch the flipper release environment variable '''
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
| [
5,
6,
7,
8,
9
] |
1,277 | 34dd6966a971e3d32e82a17cd08c3b66bb88163b | <mask token>
def showMenu():
print('---Please Choose Menu---')
print('1. Vat7')
print('2. Calculation')
print('3. Vat Calulation')
return menuSelect()
<mask token>
def priceResult():
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
return vat7(price1 + price2)
<mask token>
| def login():
usernameInput = input('Username : ')
passwordInput = input('Password : ')
if usernameInput == 'admin' and passwordInput == '1234':
return showMenu()
else:
print('User or Password Wrong.')
return login()
def showMenu():
print('---Please Choose Menu---')
print('1. Vat7')
print('2. Calculation')
print('3. Vat Calulation')
return menuSelect()
def menuSelect():
usernameSelect1 = int(input('เลือกเมนู '))
if usernameSelect1 == 1:
price = int(input('Price : '))
vat = 7
result = price + price * vat / 100
print('ราคารวม Vat7 %', result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
sum = price1 + price2
print('ราคารวม :', sum)
return menuSelect()
elif usernameSelect1 == 3:
return priceResult()
<mask token>
def priceResult():
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
return vat7(price1 + price2)
<mask token>
| def login():
usernameInput = input('Username : ')
passwordInput = input('Password : ')
if usernameInput == 'admin' and passwordInput == '1234':
return showMenu()
else:
print('User or Password Wrong.')
return login()
def showMenu():
print('---Please Choose Menu---')
print('1. Vat7')
print('2. Calculation')
print('3. Vat Calulation')
return menuSelect()
def menuSelect():
usernameSelect1 = int(input('เลือกเมนู '))
if usernameSelect1 == 1:
price = int(input('Price : '))
vat = 7
result = price + price * vat / 100
print('ราคารวม Vat7 %', result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
sum = price1 + price2
print('ราคารวม :', sum)
return menuSelect()
elif usernameSelect1 == 3:
return priceResult()
def vat7(totalPrice):
vat = 7
result = totalPrice + totalPrice * vat / 100
return result
def priceResult():
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
return vat7(price1 + price2)
<mask token>
| def login():
usernameInput = input('Username : ')
passwordInput = input('Password : ')
if usernameInput == 'admin' and passwordInput == '1234':
return showMenu()
else:
print('User or Password Wrong.')
return login()
def showMenu():
print('---Please Choose Menu---')
print('1. Vat7')
print('2. Calculation')
print('3. Vat Calulation')
return menuSelect()
def menuSelect():
usernameSelect1 = int(input('เลือกเมนู '))
if usernameSelect1 == 1:
price = int(input('Price : '))
vat = 7
result = price + price * vat / 100
print('ราคารวม Vat7 %', result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
sum = price1 + price2
print('ราคารวม :', sum)
return menuSelect()
elif usernameSelect1 == 3:
return priceResult()
def vat7(totalPrice):
vat = 7
result = totalPrice + totalPrice * vat / 100
return result
def priceResult():
price1 = int(input('ราคาชิ้นที่ 1 : '))
price2 = int(input('ราคาชิ้นที่ 2 : '))
return vat7(price1 + price2)
print(login())
| def login():
usernameInput = input("Username : ")
passwordInput = input("Password : ")
if usernameInput == "admin" and passwordInput == "1234":
return (showMenu())
else:
print("User or Password Wrong.")
return login()
def showMenu():
print("---Please Choose Menu---")
print("1. Vat7")
print("2. Calculation")
print("3. Vat Calulation")
return menuSelect()
def menuSelect():
usernameSelect1 = int(input("เลือกเมนู "))
if usernameSelect1 == 1:
price = int(input("Price : "))
vat = 7
result = price + (price * vat / 100)
print("ราคารวม Vat7 %",result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
sum = price1 + price2
print("ราคารวม :",sum)
return menuSelect()
elif usernameSelect1 == 3:
return (priceResult())
def vat7(totalPrice):
vat = 7
result = totalPrice + (totalPrice * vat / 100)
return result
def priceResult():
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
return vat7(price1+price2)
print(login())
| [
2,
4,
5,
6,
7
] |
1,278 | 5461d50d3c06bc4276044cc77bd804f6e7c16b3b | <mask token>
class FileStorage:
<mask token>
<mask token>
<mask token>
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
<mask token>
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
<mask token>
| <mask token>
class FileStorage:
<mask token>
<mask token>
<mask token>
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
| <mask token>
class FileStorage:
"""FileStorage class"""
__file_path = 'file.json'
__objects = {}
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
| <mask token>
import json
from models.base_model import BaseModel
import models
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
class FileStorage:
"""FileStorage class"""
__file_path = 'file.json'
__objects = {}
def all(self):
"""
Return:
the dictionary __objects
"""
return self.__objects
def new(self, obj):
"""
sets in objects with key classname.id
Args:
object
"""
self.__objects['{}.{}'.format(obj.__class__.__name__, obj.id)] = obj
def save(self):
"""
serializes __objects to JSON file
"""
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
"""
deserializes the JSON file
"""
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
| #!/usr/bin/python3
''' FileStorage module '''
import json
from models.base_model import BaseModel
import models
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
class FileStorage:
'''FileStorage class'''
__file_path = 'file.json'
__objects = {}
def all(self):
'''
Return:
the dictionary __objects
'''
return self.__objects
def new(self, obj):
'''
sets in objects with key classname.id
Args:
object
'''
self.__objects["{}.{}".format(obj.__class__.__name__, obj.id)] = obj
def save(self):
'''
serializes __objects to JSON file
'''
newdict = {}
with open(self.__file_path, mode='w+', encoding='utf-8') as f:
for k, v in self.__objects.items():
newdict[k] = v.to_dict()
json.dump(newdict, f)
def reload(self):
'''
deserializes the JSON file
'''
try:
with open(self.__file_path, mode='r', encoding='utf-8') as f:
newobjects = json.load(f)
for k, v in newobjects.items():
reloadedobj = eval('{}(**v)'.format(v['__class__']))
self.__objects[k] = reloadedobj
except IOError:
pass
| [
3,
5,
7,
8,
9
] |
1,279 | 8b671404228642f7ef96844c33ac3cee402bdb19 | import os, subprocess, time
from os.path import isfile, join
import shutil # to move files from af folder to another
import math
def GetListFile(PathFile, FileExtension):
return [os.path.splitext(f)[0] for f in os.listdir(PathFile) if isfile(join(PathFile, f)) and os.path.splitext(f)[1] == '.' + FileExtension]
def openfile(Path):
fileIn = open(Path, "r")
lines = fileIn.readlines()
fileIn.close()
return lines
import re
def parse_Ct_Structure(Path):
lines = openfile(Path)
# Get the initial position of the read and it Series ||||| with mutations
# replace one space in case of multi spaces re.sub( '\s+', ' ', mystring ).strip()
#print Path
#print [int(re.sub( '\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]
return [int(re.sub( '\s+', ' ', elem ).strip().split(' ')[4]) for elem in lines[1:]]
def parse_SHAPE(Path):
lines = openfile(Path)
lista=[]
# Get the initial position of the read and it Series ||||| with mutations
# replace one space in case of multi spaces re.sub( '\s+', ' ', mystring ).strip()
Intermediate=[re.sub( '\s+', '\t', elem ).strip().split('\t') for elem in lines]
for elem in Intermediate:
if len(elem)>2 and float(elem[2])!=0:
lista.append(float(elem[2]))
else:
lista.append(-10)
return lista
def Pairing_status(structure):# a line in ct format with a number for the partner if paired and 0 if not
status=[]
for value in structure:
#print value,'lol'
if value== '(' or value== ')':
status.append('P')
if value =='.':
status.append('Un')
if value=='x':
status.append('PK')
return status
def plot2D(x,y,titre):
import matplotlib.pyplot as plt
import numpy as np
plt.plot(x,y,'r.')
plt.xlabel('shape')
plt.ylabel('unpaired probability')
plt.title(titre)
# fitting functions
plt.show()
def distribution_plots(Y,titre):
# plot the x distribution
import matplotlib.pyplot as plt
import numpy as np
'''
d = {x: Y.count(x) for x in Y}
print d
plt.plot(d.keys(),d.values(),'.')
'''
plt.hist(Y, bins=10, color='green')
plt.ylabel('Frequencies')
plt.title(titre)
# fitting functions
#plt.show()
plt.savefig(titre)
def GetbasepairsProb(path_Fasta, fil, FileExtensionFasta,Path_dot_plot):
listProbaPairs=[]
SingleProba=[]
FastaPath=os.path.join(path_Fasta, fil + '.' + FileExtensionFasta)
rna =openfile(FastaPath)[1]
#print rna
os.system("RNAfold -p -d2 --noLP <" + FastaPath+ ">output.txt")
PSPath=os.path.join( Path_dot_plot,fil+"_dp.ps")
shutil.move(fil+"_dp.ps",PSPath)
os.remove(fil+"_ss.ps")
#print fil,'rr'
bpm = loadDotPlotPS(PSPath)
dp = DotPlot(rna, bpm)
for i in range(len(rna)):
for j in range(i, len(rna)):
if dp.getBPProb(i, j) > 0:# get only non null probabilities
listProbaPairs.append((i,j,dp.getBPProb(i, j)))
SingleProba=dp.getUnpairedProbs()
return listProbaPairs, SingleProba
#!!!!!!!!!!!!!!!!!!!!!!!!!!!! loadDotPlotPS(path)
def loadDotPlotPS(path):
res = {}
outTmp = open(path)
for l in outTmp:
data = l[:-1].split()
if len(data) == 4 and data[3]=="ubox":
i = int(data[0])-1
j = int(data[1])-1
p = math.pow(float(data[2]),2.)
res[i,j] = p
outTmp.close()
return res
def parse_rdat(Path):
lines = openfile(Path)
RNA=[]
seq=dict()
struct=dict()
reactivity=dict()
for line in lines:
#print line.split('\t')[0]
if line.split('\t')[0]== 'NAME':
RNA.append(line.split('\t')[1][:-1])
Id= line.split('\t')[1][:-1]
#print 'id',Id
if line.split('\t')[0]=='SEQUENCE':
seq[Id]= line.split('\t')[1][:-2]
if line.split('\t')[0]=='STRUCTURE':
struct[Id]=line.split('\t')[1][:-2]
if line.split('\t')[0]=='REACTIVITY:1':
#print line.split('\t')[1:-1]
reactivity[Id]=line.split('\t')[1:-1]
return RNA,seq, struct,reactivity
def create_fasta_shapeFiles(RNA,seq, struct,reactivity,path_fasta,path_SHAPE):
for Id in RNA:
Outfasta=open(os.path.join(path_fasta, Id+'.fa'),'w')
OutShape=open(os.path.join(path_SHAPE, Id+'.shape'),'w')
Outfasta.write("%s \n" % (">"+ Id))
Outfasta.write("%s \n" % (seq[Id]))
Outfasta.write("%s " % (struct[Id]))
Outfasta.close()
#print Id, len(reactivity[Id]),len(seq[Id])
for i, val in enumerate(reactivity[Id][:-1]):
#print reactivity[Id]
if i <len(seq[Id])and val!=" ":
print Id,i, seq[Id][i],"FF",val
OutShape.write("%i \t %s \t %f \n"%(i+1,seq[Id][i],float(val)))
#print "done"
class DotPlot:
"""Class for holding/producing base-pair probability matrices"""
def __init__(self, rna , bpm = None):
self.rna = rna[:]
if bpm is None:
# we will avoid this case to be sure that rnafold from the min works well
self.bpm = self.runRNAFold()
else:
self.bpm = bpm
def getSeq(self):
return self.rna
def getBPProb(self,i,j):
if (i,j) in self.bpm:
return self.bpm[i,j]
else:
return 0.
def getUnpairedProbs(self):
res = [1. for i in self.rna]
for i,j in self.bpm:
res[i] -= self.bpm[i,j]
res[j] -= self.bpm[i,j]
return res
def Parsefile(Path):
fileIn = open(Path, "r")
lines = fileIn.readlines()
fileIn.close()
return lines
def parseReactivityfile(fileinput):
Reactvities=[]
lines=Parsefile(fileinput)
for it in range(len(lines)):
if (lines[it].split("\t")[2][:-1]):
Reactvities.append(lines[it].split("\t")[2][:-1])
else:
Reactvities.append(-10)
return Reactvities
if __name__ == '__main__':
####################"" To parametrize ##########################
FileExtensionshape ='shape'
react='NMIA'
path_SHAPE='SHAPE_files_NMIA'
path_Fasta = 'fasta_files'
FileExtensionFasta = 'fa'
Shape={}
States={}
BP={}
UNP={}
Quaternary = {}
tertiary = {}
Unpaired2 = {}
lisTIP = []
lisTUn = []
lisHE = []
lisHES = []
lisTIPinterne=[]
lisTIPexterne=[]
SPTIP = []
SPTUn = []
SPHE = []
SPHES = []
SPTIPinterne = []
SPTIPexterne = []
lisIP = [] # shape values for paired Positions
lisUn = [] # shape values for unpaired Positions
SPIP = [] # probability of being unpaired from McCaskill for IP category
SPUn = [] # probability of being unpaired from McCaskill for Un category
RNA=[]
struct=dict()
reactivity=dict()
list3=[]
for filz in GetListFile(path_Fasta, FileExtensionFasta):
#print reactivity
rna = Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[1]
structure=Parsefile(os.path.join(path_Fasta, filz + '.' + FileExtensionFasta))[2]
States[filz] = Pairing_status(structure)
reactivity[filz]=parseReactivityfile(os.path.join(path_SHAPE, filz + react+'Shape.txt'))
# Get the end-Helix positions
print "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG",filz, len(States[filz])
print States[filz][-1]
for i,elem in enumerate(States[filz]):
if elem=='Un' or elem=='PK':
list3.append(elem)
if elem=='P':
#print i, elem,i-1,i+1, States[filz][i+1]
if i in range(1,len(rna)-3) and (States[filz][i-1]=='Un' or States[filz][i-1]=='PK' or States[filz][i+1]=='Un' or States[filz][i-1]=='PK' ): # wh should add PK constraint because we are looking for stacked substructures that does not take into account thhe tertiary or pseudoknots extension!!
list3.append('HE')
else:
list3.append(elem)
cum=[]
for filz in GetListFile(path_Fasta, FileExtensionFasta):
if reactivity[filz]==[]:
print "warning!! Empty reactivity",rna
cum=cum+reactivity[filz]
Shape=cum
#print len(Shape)
lIP = [] # shape values for paired Positions
lUn = []
lHE =[]
# for structural_study
#print [elem[:-1] for elem in Shape]
for nucl,shape in zip(list3,Shape):
if shape!=-10 and nucl=='P':
print "la vaaleur", shape
lIP.append( float(shape))
#SPIP.append(UnpPb )
if shape!=-10 and nucl=='Un':
lUn.append( float(shape))
#SPUn.append(UnpPb )
if shape!=-10 and nucl=='HE':
lHE.append( float(shape))
import numpy as np
labels=["Stacked nucleotides","Unpaired nucleotides","Helix-end"]
lists= [lIP,lUn, lHE]
for (data, title) in [(lIP, "P" + str(react)), (lUn, "U"+ str(react)),(lHE,"HE"+ str(react))]:
print title ,'\n' , data
| null | null | null | null | [
0
] |
1,280 | 33aa5c5ab75a26705875b55baf61f7f996cb69cd | <mask token>
| <mask token>
urlpatterns = [path('', views.home, name='VitaminSHE-home'), path('signup/',
views.signup, name='VitaminSHE-signup'), path('login/', views.login,
name='VitaminSHE-login'), path('healthcheck/', views.healthcheck, name=
'VitaminSHE-healthcheck'), path('food/', views.food, name=
'VitaminSHE-food'), path('book/', views.book, name='VitaminSHE-book'),
path('why/', views.why, name='VitaminSHE-why')]
| from django.urls import path
from . import views
urlpatterns = [path('', views.home, name='VitaminSHE-home'), path('signup/',
views.signup, name='VitaminSHE-signup'), path('login/', views.login,
name='VitaminSHE-login'), path('healthcheck/', views.healthcheck, name=
'VitaminSHE-healthcheck'), path('food/', views.food, name=
'VitaminSHE-food'), path('book/', views.book, name='VitaminSHE-book'),
path('why/', views.why, name='VitaminSHE-why')]
| from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='VitaminSHE-home'),
path('signup/', views.signup, name='VitaminSHE-signup'),
path('login/', views.login, name='VitaminSHE-login'),
path('healthcheck/', views.healthcheck, name='VitaminSHE-healthcheck'),
path('food/', views.food, name='VitaminSHE-food'),
path('book/', views.book, name='VitaminSHE-book'),
path('why/', views.why, name='VitaminSHE-why'),
]
| null | [
0,
1,
2,
3
] |
1,281 | 6b616f5ee0a301b76ad3f7284b47f225a694d33c | <mask token>
| from plprofiler_tool import main
from plprofiler import plprofiler
| null | null | null | [
0,
1
] |
1,282 | 26ae44b5be1d78ed3fe9c858413ae47e163c5460 | <mask token>
| <mask token>
class Solution:
<mask token>
| <mask token>
class Solution:
def productExceptSelf(self, nums: List[int]) ->List[int]:
output = []
prod = 1
for num in nums:
output.append(prod)
prod *= num
prod = 1
for k in range(len(nums) - 1, -1, -1):
output[k] = output[k] * prod
prod *= nums[k]
return output
| from typing import List
<mask token>
class Solution:
def productExceptSelf(self, nums: List[int]) ->List[int]:
output = []
prod = 1
for num in nums:
output.append(prod)
prod *= num
prod = 1
for k in range(len(nums) - 1, -1, -1):
output[k] = output[k] * prod
prod *= nums[k]
return output
| from typing import List
"""
1. Generate an array containing the products of all elements to the left of current element
2. Similarly, start from the last element and generate an array containing the products to the right of each element
3. Multiply both arrays element-wise
"""
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
output = []
prod = 1
# First generate the products to the left of the current element
for num in nums:
output.append(prod)
prod *= num
prod = 1
# Now, generate and multiply the product to the right of current element
for k in range(len(nums) - 1, -1, -1):
output[k] = output[k] * prod
prod *= nums[k]
return output
| [
0,
1,
2,
3,
4
] |
1,283 | 284955a555ce1a727ba5041008cd0bac3c3bed49 | <mask token>
| <mask token>
class Covid(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Covid(models.Model):
states = models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
| from django.db import models
class Covid(models.Model):
states = models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
| from django.db import models
# Create your models here.
class Covid(models.Model):
states= models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
| [
0,
1,
2,
3,
4
] |
1,284 | 3aff6bdfd7c2ffd57af7bb5d0079a8a428e02331 | <mask token>
def evaluate(sess, data, embds, model, logdir):
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
acc = metrics.accuracy_score(pred['labels'], pred['predictions'])
macro_f1 = metrics.f1_score(pred['labels'], pred['predictions'],
average='macro')
f1_0 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=0)
f1_1 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=1)
macro_precision = metrics.precision_score(pred['labels'], pred[
'predictions'], average='macro')
precision_0 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=0)
precision_1 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=1)
macro_recall = metrics.recall_score(pred['labels'], pred['predictions'],
average='macro')
recall_0 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=0)
recall_1 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=1)
return (acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1)
<mask token>
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = 'han_100d_163b_50cx_0.0001_0.5d'
with tf.Session() as sess:
model = HierarchicalAttention(num_classes=2, vocab_size=embds.shape
[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.
alphas_sent], feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.
val_max_sent_length, 1])
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred[
'probabilities'], a_word, a_sent))
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1] == x[2] and x[1] == 1]
def get_predicted_prob(x):
return x[3][x[2]]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse
=True)
print(sorted_correct[0:2])
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5])
)
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
sentence = np.array([x[0] for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index('PAD')
attention_weights_word[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2
print(np.shape(sentence))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent,
sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(attention_weights.shape[-1] * (max_word_len *
font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title('Attention')
plt.xlabel('words')
plt.ylabel('batch')
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k',
linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4,
cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.
get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = 0.0, 0.0, 0.0
else:
color = 1.0, 1.0, 1.0
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != 'PAD':
word = sentence[i, j]
else:
word = ''
fontsize = _font_size(len(word))
ax.text(x, y, word, ha='center', va='center', color=color, size
=fontsize)
idx = [(i + 0.5) for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention('attention_real_han.png', attention_weights_word, np.
array([[x] for x in attention_weights_sent]), sentence)
<mask token>
def get_confusion(data, embds):
tf.reset_default_graph()
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(num_classes=2, vocab_size=embds.shape[0
], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, 0)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
return 0
else:
return -1
labels = list(map(fn, pred['labels']))
predicts = list(map(fn, pred['predictions']))
cnf_matrix = metrics.confusion_matrix(labels, predicts)
plt.figure()
classes = ['True', 'False']
plot_confusion_matrix(cnf_matrix, classes=classes, title=
'Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,
title='Normalized confusion matrix')
plt.show()
def t_test(data, embds):
tf.reset_default_graph()
acc_ban = []
f1_ban = []
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
for it in range(30):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = BucketizedAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(acc)
acc_ban.append(acc)
f1_ban.append(macro_f1)
tf.reset_default_graph()
acc_cnn = [0.6313328137178488, 0.6157443491816056, 0.6110678098207326,
0.6141855027279813, 0.6165237724084178, 0.627435697583788,
0.6297739672642245, 0.6102883865939205, 0.6219797349961029,
0.6157443491816056, 0.6188620420888542, 0.6087295401402962,
0.6071706936866719, 0.6118472330475448, 0.6336710833982853,
0.6243180046765393, 0.6056118472330475, 0.6180826188620421,
0.6243180046765393, 0.6180826188620421, 0.6250974279033515,
0.6180826188620421, 0.6219797349961029, 0.6056118472330475,
0.6188620420888542, 0.6235385814497272, 0.6063912704598597,
0.5962587685113017, 0.6313328137178488, 0.6149649259547935]
f1_cnn = [0.625208977558574, 0.6067531970160148, 0.6109316669026621,
0.6020553751990241, 0.6090837028412892, 0.6094950282209589,
0.6172590617767771, 0.607132008544496, 0.6080345191414308,
0.5998115849326153, 0.6085742361143607, 0.6078430656223209,
0.5935340795944845, 0.5862705332027911, 0.6173464207571212,
0.6042373835890662, 0.6010630976083375, 0.5991259035560702,
0.5946686067851712, 0.5925791031776069, 0.6052042516849045,
0.6115004325794092, 0.6152243182460431, 0.6045333820662768,
0.6009255107006212, 0.6008323601423038, 0.5949095710792511,
0.59088816113464, 0.6062203096074071, 0.6064241216914394]
print(stats.ttest_ind(acc_ban, acc_cnn, equal_var=False))
print(stats.ttest_ind(f1_ban, f1_cnn, equal_var=False))
| <mask token>
def evaluate(sess, data, embds, model, logdir):
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
acc = metrics.accuracy_score(pred['labels'], pred['predictions'])
macro_f1 = metrics.f1_score(pred['labels'], pred['predictions'],
average='macro')
f1_0 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=0)
f1_1 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=1)
macro_precision = metrics.precision_score(pred['labels'], pred[
'predictions'], average='macro')
precision_0 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=0)
precision_1 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=1)
macro_recall = metrics.recall_score(pred['labels'], pred['predictions'],
average='macro')
recall_0 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=0)
recall_1 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=1)
return (acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1)
def run_std(data, embds):
selection = get_attention_weights(data, embds)
visualize_attention(selection)
tf.reset_default_graph()
results = []
now = 'han_100d_163b_50cx_0.0001_0.5d'
result = []
for it in range(5):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = HierarchicalAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(logdir)
print(acc, ' ', macro_f1, ' ', f1_0, ' ', f1_1, ' ',
macro_precision, ' ', precision_0, ' ', precision_1, ' ',
macro_recall, ' ', recall_0, ' ', recall_1)
result.append([acc, macro_f1, f1_0, f1_1, macro_precision,
precision_0, precision_1, macro_recall, recall_0, recall_1])
result_averages = np.mean(result, axis=0)
print(result_averages)
result_stds = np.std(result, axis=0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print('averages-------')
print(results)
print('------------')
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = 'han_100d_163b_50cx_0.0001_0.5d'
with tf.Session() as sess:
model = HierarchicalAttention(num_classes=2, vocab_size=embds.shape
[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.
alphas_sent], feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.
val_max_sent_length, 1])
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred[
'probabilities'], a_word, a_sent))
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1] == x[2] and x[1] == 1]
def get_predicted_prob(x):
return x[3][x[2]]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse
=True)
print(sorted_correct[0:2])
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5])
)
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
sentence = np.array([x[0] for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index('PAD')
attention_weights_word[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2
print(np.shape(sentence))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent,
sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(attention_weights.shape[-1] * (max_word_len *
font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title('Attention')
plt.xlabel('words')
plt.ylabel('batch')
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k',
linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4,
cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.
get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = 0.0, 0.0, 0.0
else:
color = 1.0, 1.0, 1.0
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != 'PAD':
word = sentence[i, j]
else:
word = ''
fontsize = _font_size(len(word))
ax.text(x, y, word, ha='center', va='center', color=color, size
=fontsize)
idx = [(i + 0.5) for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention('attention_real_han.png', attention_weights_word, np.
array([[x] for x in attention_weights_sent]), sentence)
<mask token>
def get_confusion(data, embds):
tf.reset_default_graph()
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(num_classes=2, vocab_size=embds.shape[0
], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, 0)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
return 0
else:
return -1
labels = list(map(fn, pred['labels']))
predicts = list(map(fn, pred['predictions']))
cnf_matrix = metrics.confusion_matrix(labels, predicts)
plt.figure()
classes = ['True', 'False']
plot_confusion_matrix(cnf_matrix, classes=classes, title=
'Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,
title='Normalized confusion matrix')
plt.show()
def t_test(data, embds):
tf.reset_default_graph()
acc_ban = []
f1_ban = []
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
for it in range(30):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = BucketizedAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(acc)
acc_ban.append(acc)
f1_ban.append(macro_f1)
tf.reset_default_graph()
acc_cnn = [0.6313328137178488, 0.6157443491816056, 0.6110678098207326,
0.6141855027279813, 0.6165237724084178, 0.627435697583788,
0.6297739672642245, 0.6102883865939205, 0.6219797349961029,
0.6157443491816056, 0.6188620420888542, 0.6087295401402962,
0.6071706936866719, 0.6118472330475448, 0.6336710833982853,
0.6243180046765393, 0.6056118472330475, 0.6180826188620421,
0.6243180046765393, 0.6180826188620421, 0.6250974279033515,
0.6180826188620421, 0.6219797349961029, 0.6056118472330475,
0.6188620420888542, 0.6235385814497272, 0.6063912704598597,
0.5962587685113017, 0.6313328137178488, 0.6149649259547935]
f1_cnn = [0.625208977558574, 0.6067531970160148, 0.6109316669026621,
0.6020553751990241, 0.6090837028412892, 0.6094950282209589,
0.6172590617767771, 0.607132008544496, 0.6080345191414308,
0.5998115849326153, 0.6085742361143607, 0.6078430656223209,
0.5935340795944845, 0.5862705332027911, 0.6173464207571212,
0.6042373835890662, 0.6010630976083375, 0.5991259035560702,
0.5946686067851712, 0.5925791031776069, 0.6052042516849045,
0.6115004325794092, 0.6152243182460431, 0.6045333820662768,
0.6009255107006212, 0.6008323601423038, 0.5949095710792511,
0.59088816113464, 0.6062203096074071, 0.6064241216914394]
print(stats.ttest_ind(acc_ban, acc_cnn, equal_var=False))
print(stats.ttest_ind(f1_ban, f1_cnn, equal_var=False))
| <mask token>
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def evaluate(sess, data, embds, model, logdir):
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
acc = metrics.accuracy_score(pred['labels'], pred['predictions'])
macro_f1 = metrics.f1_score(pred['labels'], pred['predictions'],
average='macro')
f1_0 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=0)
f1_1 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=1)
macro_precision = metrics.precision_score(pred['labels'], pred[
'predictions'], average='macro')
precision_0 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=0)
precision_1 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=1)
macro_recall = metrics.recall_score(pred['labels'], pred['predictions'],
average='macro')
recall_0 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=0)
recall_1 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=1)
return (acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1)
def run_std(data, embds):
selection = get_attention_weights(data, embds)
visualize_attention(selection)
tf.reset_default_graph()
results = []
now = 'han_100d_163b_50cx_0.0001_0.5d'
result = []
for it in range(5):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = HierarchicalAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(logdir)
print(acc, ' ', macro_f1, ' ', f1_0, ' ', f1_1, ' ',
macro_precision, ' ', precision_0, ' ', precision_1, ' ',
macro_recall, ' ', recall_0, ' ', recall_1)
result.append([acc, macro_f1, f1_0, f1_1, macro_precision,
precision_0, precision_1, macro_recall, recall_0, recall_1])
result_averages = np.mean(result, axis=0)
print(result_averages)
result_stds = np.std(result, axis=0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print('averages-------')
print(results)
print('------------')
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = 'han_100d_163b_50cx_0.0001_0.5d'
with tf.Session() as sess:
model = HierarchicalAttention(num_classes=2, vocab_size=embds.shape
[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.
alphas_sent], feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.
val_max_sent_length, 1])
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred[
'probabilities'], a_word, a_sent))
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1] == x[2] and x[1] == 1]
def get_predicted_prob(x):
return x[3][x[2]]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse
=True)
print(sorted_correct[0:2])
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5])
)
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
sentence = np.array([x[0] for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index('PAD')
attention_weights_word[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2
print(np.shape(sentence))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent,
sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(attention_weights.shape[-1] * (max_word_len *
font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title('Attention')
plt.xlabel('words')
plt.ylabel('batch')
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k',
linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4,
cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.
get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = 0.0, 0.0, 0.0
else:
color = 1.0, 1.0, 1.0
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != 'PAD':
word = sentence[i, j]
else:
word = ''
fontsize = _font_size(len(word))
ax.text(x, y, word, ha='center', va='center', color=color, size
=fontsize)
idx = [(i + 0.5) for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention('attention_real_han.png', attention_weights_word, np.
array([[x] for x in attention_weights_sent]), sentence)
def plot_confusion_matrix(cm, classes, normalize=False, title=
'Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',
color='white' if cm[i, j] > thresh else 'black')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(num_classes=2, vocab_size=embds.shape[0
], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, 0)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
return 0
else:
return -1
labels = list(map(fn, pred['labels']))
predicts = list(map(fn, pred['predictions']))
cnf_matrix = metrics.confusion_matrix(labels, predicts)
plt.figure()
classes = ['True', 'False']
plot_confusion_matrix(cnf_matrix, classes=classes, title=
'Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,
title='Normalized confusion matrix')
plt.show()
def t_test(data, embds):
tf.reset_default_graph()
acc_ban = []
f1_ban = []
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
for it in range(30):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = BucketizedAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(acc)
acc_ban.append(acc)
f1_ban.append(macro_f1)
tf.reset_default_graph()
acc_cnn = [0.6313328137178488, 0.6157443491816056, 0.6110678098207326,
0.6141855027279813, 0.6165237724084178, 0.627435697583788,
0.6297739672642245, 0.6102883865939205, 0.6219797349961029,
0.6157443491816056, 0.6188620420888542, 0.6087295401402962,
0.6071706936866719, 0.6118472330475448, 0.6336710833982853,
0.6243180046765393, 0.6056118472330475, 0.6180826188620421,
0.6243180046765393, 0.6180826188620421, 0.6250974279033515,
0.6180826188620421, 0.6219797349961029, 0.6056118472330475,
0.6188620420888542, 0.6235385814497272, 0.6063912704598597,
0.5962587685113017, 0.6313328137178488, 0.6149649259547935]
f1_cnn = [0.625208977558574, 0.6067531970160148, 0.6109316669026621,
0.6020553751990241, 0.6090837028412892, 0.6094950282209589,
0.6172590617767771, 0.607132008544496, 0.6080345191414308,
0.5998115849326153, 0.6085742361143607, 0.6078430656223209,
0.5935340795944845, 0.5862705332027911, 0.6173464207571212,
0.6042373835890662, 0.6010630976083375, 0.5991259035560702,
0.5946686067851712, 0.5925791031776069, 0.6052042516849045,
0.6115004325794092, 0.6152243182460431, 0.6045333820662768,
0.6009255107006212, 0.6008323601423038, 0.5949095710792511,
0.59088816113464, 0.6062203096074071, 0.6064241216914394]
print(stats.ttest_ind(acc_ban, acc_cnn, equal_var=False))
print(stats.ttest_ind(f1_ban, f1_cnn, equal_var=False))
| import tensorflow as tf
import numpy as np
from datetime import datetime
import os
from CNN import CNN
from LSTM import LSTM
from BiLSTM import BiLSTM
from SLAN import Attention
from HAN2 import HierarchicalAttention
import sklearn.metrics as metrics
import DataProcessor as dp
import matplotlib.pyplot as plt
import numpy as np
import itertools
from scipy import stats
from math import floor
from BAN import BucketizedAttention
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def evaluate(sess, data, embds, model, logdir):
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
acc = metrics.accuracy_score(pred['labels'], pred['predictions'])
macro_f1 = metrics.f1_score(pred['labels'], pred['predictions'],
average='macro')
f1_0 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=0)
f1_1 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=1)
macro_precision = metrics.precision_score(pred['labels'], pred[
'predictions'], average='macro')
precision_0 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=0)
precision_1 = metrics.precision_score(pred['labels'], pred[
'predictions'], pos_label=1)
macro_recall = metrics.recall_score(pred['labels'], pred['predictions'],
average='macro')
recall_0 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=0)
recall_1 = metrics.recall_score(pred['labels'], pred['predictions'],
pos_label=1)
return (acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1)
def run_std(data, embds):
selection = get_attention_weights(data, embds)
visualize_attention(selection)
tf.reset_default_graph()
results = []
now = 'han_100d_163b_50cx_0.0001_0.5d'
result = []
for it in range(5):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = HierarchicalAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(logdir)
print(acc, ' ', macro_f1, ' ', f1_0, ' ', f1_1, ' ',
macro_precision, ' ', precision_0, ' ', precision_1, ' ',
macro_recall, ' ', recall_0, ' ', recall_1)
result.append([acc, macro_f1, f1_0, f1_1, macro_precision,
precision_0, precision_1, macro_recall, recall_0, recall_1])
result_averages = np.mean(result, axis=0)
print(result_averages)
result_stds = np.std(result, axis=0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print('averages-------')
print(results)
print('------------')
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = 'han_100d_163b_50cx_0.0001_0.5d'
with tf.Session() as sess:
model = HierarchicalAttention(num_classes=2, vocab_size=embds.shape
[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.
alphas_sent], feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.
val_max_sent_length, 1])
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred[
'probabilities'], a_word, a_sent))
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1] == x[2] and x[1] == 1]
def get_predicted_prob(x):
return x[3][x[2]]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse
=True)
print(sorted_correct[0:2])
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5])
)
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
sentence = np.array([x[0] for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index('PAD')
attention_weights_word[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2
print(np.shape(sentence))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent,
sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(attention_weights.shape[-1] * (max_word_len *
font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title('Attention')
plt.xlabel('words')
plt.ylabel('batch')
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k',
linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4,
cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.
get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = 0.0, 0.0, 0.0
else:
color = 1.0, 1.0, 1.0
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != 'PAD':
word = sentence[i, j]
else:
word = ''
fontsize = _font_size(len(word))
ax.text(x, y, word, ha='center', va='center', color=color, size
=fontsize)
idx = [(i + 0.5) for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention('attention_real_han.png', attention_weights_word, np.
array([[x] for x in attention_weights_sent]), sentence)
def plot_confusion_matrix(cm, classes, normalize=False, title=
'Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment='center',
color='white' if cm[i, j] > thresh else 'black')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(num_classes=2, vocab_size=embds.shape[0
], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, 0)
checkpoint_dir = '{}checkpoints'.format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder:
embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths:
sent_lengths_val, model.seq_lengths: seq_lengths_val, model.
dropout_keep_prob: 1, model.max_seq_length: data.
test_max_seq_length, model.max_sent_length: data.test_max_sent_length}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
return 0
else:
return -1
labels = list(map(fn, pred['labels']))
predicts = list(map(fn, pred['predictions']))
cnf_matrix = metrics.confusion_matrix(labels, predicts)
plt.figure()
classes = ['True', 'False']
plot_confusion_matrix(cnf_matrix, classes=classes, title=
'Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,
title='Normalized confusion matrix')
plt.show()
def t_test(data, embds):
tf.reset_default_graph()
acc_ban = []
f1_ban = []
now = 'banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop'
for it in range(30):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = BucketizedAttention(num_classes=2, vocab_size=embds.
shape[0], embedding_size=embds.shape[1])
root_logdir = 'logs'
logdir = '{}/run-{}-{}/'.format(root_logdir, now, it)
(acc, macro_f1, f1_0, f1_1, macro_precision, precision_0,
precision_1, macro_recall, recall_0, recall_1) = evaluate(sess,
data, embds, lstm, logdir)
print(acc)
acc_ban.append(acc)
f1_ban.append(macro_f1)
tf.reset_default_graph()
acc_cnn = [0.6313328137178488, 0.6157443491816056, 0.6110678098207326,
0.6141855027279813, 0.6165237724084178, 0.627435697583788,
0.6297739672642245, 0.6102883865939205, 0.6219797349961029,
0.6157443491816056, 0.6188620420888542, 0.6087295401402962,
0.6071706936866719, 0.6118472330475448, 0.6336710833982853,
0.6243180046765393, 0.6056118472330475, 0.6180826188620421,
0.6243180046765393, 0.6180826188620421, 0.6250974279033515,
0.6180826188620421, 0.6219797349961029, 0.6056118472330475,
0.6188620420888542, 0.6235385814497272, 0.6063912704598597,
0.5962587685113017, 0.6313328137178488, 0.6149649259547935]
f1_cnn = [0.625208977558574, 0.6067531970160148, 0.6109316669026621,
0.6020553751990241, 0.6090837028412892, 0.6094950282209589,
0.6172590617767771, 0.607132008544496, 0.6080345191414308,
0.5998115849326153, 0.6085742361143607, 0.6078430656223209,
0.5935340795944845, 0.5862705332027911, 0.6173464207571212,
0.6042373835890662, 0.6010630976083375, 0.5991259035560702,
0.5946686067851712, 0.5925791031776069, 0.6052042516849045,
0.6115004325794092, 0.6152243182460431, 0.6045333820662768,
0.6009255107006212, 0.6008323601423038, 0.5949095710792511,
0.59088816113464, 0.6062203096074071, 0.6064241216914394]
print(stats.ttest_ind(acc_ban, acc_cnn, equal_var=False))
print(stats.ttest_ind(f1_ban, f1_cnn, equal_var=False))
| import tensorflow as tf
import numpy as np
from datetime import datetime
import os
from CNN import CNN
from LSTM import LSTM
from BiLSTM import BiLSTM
from SLAN import Attention
from HAN2 import HierarchicalAttention
import sklearn.metrics as metrics
import DataProcessor as dp
import matplotlib.pyplot as plt
import numpy as np
import itertools
from scipy import stats
from math import floor
from BAN import BucketizedAttention
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def evaluate(sess, data, embds, model, logdir):
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
#saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_dir))
# Training model
#training_op, global_step = model.optimize()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
#print("Evaluation:")
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.test_max_seq_length,
model.max_sent_length: data.test_max_sent_length
}
pred = sess.run(predictions, feed_dict=feed_dict)
acc = metrics.accuracy_score(pred['labels'], pred['predictions'])
macro_f1 = metrics.f1_score(pred['labels'], pred['predictions'], average="macro")
f1_0 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=0)
f1_1 = metrics.f1_score(pred['labels'], pred['predictions'], pos_label=1)
macro_precision = metrics.precision_score(pred['labels'], pred['predictions'], average="macro")
precision_0 = metrics.precision_score(pred['labels'], pred['predictions'], pos_label=0)
precision_1 = metrics.precision_score(pred['labels'], pred['predictions'], pos_label=1)
macro_recall = metrics.recall_score(pred['labels'], pred['predictions'], average="macro")
recall_0 = metrics.recall_score(pred['labels'], pred['predictions'], pos_label=0)
recall_1 = metrics.recall_score(pred['labels'], pred['predictions'], pos_label=1)
return (acc, macro_f1, f1_0, f1_1, macro_precision, precision_0, precision_1, macro_recall, recall_0, recall_1)
#return (acc, macro_f1, 1, 1, macro_precision, 1, 1, macro_recall, 1, 1)
def run_std(data, embds):
selection = get_attention_weights(data, embds)
visualize_attention(selection)
tf.reset_default_graph()
results = []
now = "han_100d_163b_50cx_0.0001_0.5d"
result = []
for it in range(5):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = HierarchicalAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
acc, macro_f1, f1_0, f1_1, macro_precision, precision_0, precision_1, macro_recall, recall_0, recall_1 = evaluate(
sess, data, embds, lstm, logdir)
print(logdir)
print(acc, " ", macro_f1, " ", f1_0, " ", f1_1, " ", macro_precision, " ", precision_0, " ",
precision_1, " ", macro_recall, " ", recall_0, " ", recall_1)
result.append(
[acc, macro_f1, f1_0, f1_1, macro_precision, precision_0, precision_1, macro_recall,
recall_0, recall_1])
result_averages = np.mean(result, axis=0)
print(result_averages)
result_stds = np.std(result, axis=0)
print(result_stds)
result = list(zip(result_averages, result_stds))
result.insert(0, now)
results.append(result)
print(result)
print("averages-------")
print(results)
print("------------")
def get_attention_weights(data, embds):
tf.reset_default_graph()
it = 0
now = "han_100d_163b_50cx_0.0001_0.5d"
with tf.Session() as sess:
model = HierarchicalAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
# saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_dir))
# Training model
# training_op, global_step = model.optimize()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
# print("Evaluation:")
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_val()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.val_max_seq_length,
model.max_sent_length: data.val_max_sent_length
}
pred, a_word, a_sent = sess.run([predictions, model.alphas_word, model.alphas_sent], feed_dict=feed_dict)
#pred, a1, A = sess.run([predictions, model.alphas1, model.alphas2, model.alphas3, model.alphas4],
#feed_dict=feed_dict)
a_word = np.reshape(a_word, [-1, data.val_max_seq_length, data.val_max_sent_length, 1])
# filter on correct predictions
zipped = list(zip(x_val, pred['labels'], pred['predictions'], pred['probabilities'], a_word, a_sent))
# print(zipped[0:2])
selection = [list(x) for x in zipped][133]
zipped_correct = [list(x) for x in zipped if x[1]==x[2] and x[1] == 1]
# print(zipped_correct[0:2])
def get_predicted_prob(x):
return (x[3])[(x[2])]
sorted_correct = sorted(zipped_correct, key=get_predicted_prob, reverse=True)
print(sorted_correct[0:2])
#selection = sorted_correct[1]
selection_zipped_tuple = list(zip(selection[0], selection[4], selection[5]))
#selection_zipped_tuple = list(zip(selection[0], selection[4]))
selection_zipped = [list(x) for x in selection_zipped_tuple]
for s in selection_zipped:
s[0] = dp.translate_to_voc(s[0])
return selection_zipped
def visualize_attention(data):
#data = np.array(data)
data.reverse()
attention_weights_word = np.array([np.squeeze(x[1]) for x in data])
attention_weights_sent = np.array([np.squeeze(x[2]) for x in data])
#max_weight = attention_weights.max()
#attention_weights = attention_weights/max_weight # increase weights to make visualization clearer
#max_weight1 = np.array(attention_weights1.max(axis=-1))
#attention_weights1 = attention_weights1 / max_weight1[:, None] # increase weights to make visualization clearer
sentence = np.array([x[0] for x in data])
#labels = np.array(["label-{}, pred-{}, prob-{}".format(x[1], x[2], max(x[3])) for x in data])
max_idx = 0
empty_rows = 0
for i, s in enumerate(sentence):
idx = list(s).index("PAD")
attention_weights_word[i, idx:] = 0
# attention_weights3[i, idx:] = 0
# attention_weights4[i, idx:] = 0
if idx > max_idx:
max_idx = idx
if idx == 0:
empty_rows += 1
sentence = sentence[empty_rows:, 0:max_idx]
attention_weights_word = attention_weights_word[empty_rows:, 0:max_idx]
attention_weights_sent = attention_weights_sent[empty_rows:]
# attention_weights3 = attention_weights3[empty_rows:, 0:max_idx]
# attention_weights4 = attention_weights4[empty_rows:, 0:max_idx]
max_weight1 = attention_weights_word.max()
attention_weights_word = attention_weights_word / max_weight1 # increase weights to make visualization clearer
max_weight2 = attention_weights_sent.max()
attention_weights_sent = attention_weights_sent / max_weight2 # increase weights to make visualization clearer
# max_weight3 = attention_weights3.max()
# attention_weights3 = attention_weights3 / max_weight3 # increase weights to make visualization clearer
# max_weight4 = attention_weights4.max()
# attention_weights4 = attention_weights4 / max_weight4 # increase weights to make visualization clearer
#print(np.shape(attention_weights1))
print(np.shape(sentence))
#print(np.shape(labels))
MAX_FONTSIZE = 15
MIN_FONTSIZE = 10
def _font_size(word_len):
return max(int(round(MAX_FONTSIZE - 0.5 * word_len)), MIN_FONTSIZE)
def plot_attention(fname, attention_weights, attention_weights_sent, sentence):
length = np.vectorize(lambda s: len(s))
max_word_len = length(sentence).max()
font_size_max_len = _font_size(max_word_len)
plt.figure(figsize=(
attention_weights.shape[-1] * (max_word_len * font_size_max_len / 100 + 0.5), attention_weights.shape[0]))
plt.title("Attention")
plt.xlabel("words")
plt.ylabel("batch")
pc_sent = plt.pcolor(attention_weights_sent, edgecolors='k', linewidths=4, cmap='Reds', vmin=0.0, vmax=1.0)
pc_sent.update_scalarmappable()
pc = plt.pcolor(attention_weights, edgecolors='k', linewidths=4, cmap='Blues', vmin=0.0, vmax=1.0)
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
j, i = int(floor(x)), int(floor(y))
if sentence[i, j] != "PAD":
word = sentence[i, j]
else:
word = ""
fontsize = _font_size(len(word))
ax.text(x, y, word, ha="center", va="center", color=color, size=fontsize)
idx = [i + 0.5 for i in range(attention_weights_sent.shape[0])]
plt.yticks(idx, attention_weights_sent)
for l, i in zip(ax.yaxis.get_ticklabels(), pc_sent.get_facecolors()):
l.set_color(i)
l.set_backgroundcolor(i)
l.set_fontsize(15)
plt.colorbar(pc)
plt.savefig(fname)
plot_attention("attention_real_han.png", attention_weights_word, np.array([[x] for x in attention_weights_sent]), sentence)
# plot_attention("attention_real3.png", attention_weights3, sentence)
# plot_attention("attention_real4.png", attention_weights4, sentence)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def get_confusion(data, embds):
tf.reset_default_graph()
now = "banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop"
tf.reset_default_graph()
with tf.Session() as sess:
model = BucketizedAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, 0)
checkpoint_dir = "{}checkpoints".format(logdir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(model.embedding_init, feed_dict={model.embedding_placeholder: embds})
saver.restore(sess, checkpoint_dir)
predictions = model.predict()
x_val, y_val, sent_lengths_val, seq_lengths_val = data.fetch_test()
feed_dict = {model.x: x_val, model.y: y_val, model.sent_lengths: sent_lengths_val,
model.seq_lengths: seq_lengths_val, model.dropout_keep_prob: 1,
model.max_seq_length: data.test_max_seq_length,
model.max_sent_length: data.test_max_sent_length
}
pred = sess.run(predictions, feed_dict=feed_dict)
def fn(x):
if x == 0:
return 3
elif x == 1:
return 4
elif x == 2:
return 2
elif x == 3:
return 1
elif x == 4:
return 5
elif x == 5:
return 0
else:
return -1
labels = list(map(fn, pred['labels']))
predicts = list(map(fn, pred['predictions']))
cnf_matrix = metrics.confusion_matrix(labels, predicts)
# Plot non-normalized confusion matrix
plt.figure()
#classes = ["True", "Mostly-true", "Half-true", "Barely-true", "False", "Pants-on-fire"]
classes = ["True", "False"]
plot_confusion_matrix(cnf_matrix, classes=classes,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,
title='Normalized confusion matrix')
plt.show()
def t_test(data, embds):
tf.reset_default_graph()
acc_ban = []
f1_ban = []
now = "banl2norm_100d_163b_[10,10,10,10]cx_0.0001_0.5d_accstop"
for it in range(30):
tf.reset_default_graph()
with tf.Session() as sess:
lstm = BucketizedAttention(
num_classes=2,
vocab_size=embds.shape[0],
embedding_size=embds.shape[1]
)
root_logdir = "logs"
logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
acc, macro_f1, f1_0, f1_1, macro_precision, precision_0, precision_1, macro_recall, recall_0, recall_1 = evaluate(
sess, data, embds, lstm, logdir)
print(acc)
acc_ban.append(acc)
f1_ban.append(macro_f1)
tf.reset_default_graph()
acc_cnn = [0.6313328137178488, 0.6157443491816056, 0.6110678098207326, 0.6141855027279813, 0.6165237724084178, 0.627435697583788, 0.6297739672642245, 0.6102883865939205, 0.6219797349961029, 0.6157443491816056, 0.6188620420888542, 0.6087295401402962, 0.6071706936866719, 0.6118472330475448, 0.6336710833982853, 0.6243180046765393, 0.6056118472330475, 0.6180826188620421, 0.6243180046765393, 0.6180826188620421, 0.6250974279033515, 0.6180826188620421, 0.6219797349961029, 0.6056118472330475, 0.6188620420888542, 0.6235385814497272, 0.6063912704598597, 0.5962587685113017, 0.6313328137178488, 0.6149649259547935]
f1_cnn = [0.625208977558574, 0.6067531970160148, 0.6109316669026621, 0.6020553751990241, 0.6090837028412892, 0.6094950282209589, 0.6172590617767771, 0.607132008544496, 0.6080345191414308, 0.5998115849326153, 0.6085742361143607, 0.6078430656223209, 0.5935340795944845, 0.5862705332027911, 0.6173464207571212, 0.6042373835890662, 0.6010630976083375, 0.5991259035560702, 0.5946686067851712, 0.5925791031776069, 0.6052042516849045, 0.6115004325794092, 0.6152243182460431, 0.6045333820662768, 0.6009255107006212, 0.6008323601423038, 0.5949095710792511, 0.59088816113464, 0.6062203096074071, 0.6064241216914394]
# now = "han_100d_163b_50cx_0.0001_0.5d"
# for it in range(30):
# tf.reset_default_graph()
# with tf.Session() as sess:
# lstm = HierarchicalAttention(
# num_classes=2,
# vocab_size=embds.shape[0],
# embedding_size=embds.shape[1]
# )
# root_logdir = "logs"
# logdir = "{}/run-{}-{}/".format(root_logdir, now, it)
# acc, macro_f1, f1_0, f1_1, macro_precision, precision_0, precision_1, macro_recall, recall_0, recall_1 = evaluate(
# sess, data, embds, lstm, logdir)
# print(acc)
# acc_han.append(acc)
# f1_han.append(macro_f1)
print(stats.ttest_ind(acc_ban, acc_cnn, equal_var=False))
print(stats.ttest_ind(f1_ban, f1_cnn, equal_var=False)) | [
5,
6,
8,
9,
10
] |
1,285 | 6b2f10449909d978ee294a502a376c8091af06e0 | <mask token>
class GerberCanvas:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
<mask token>
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
<mask token>
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
| <mask token>
class GerberCanvas:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
<mask token>
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
| <mask token>
class GerberCanvas:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
all_ids = self.my_canvas.find_all()
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
| <mask token>
DEBUG = False
class GerberCanvas:
file_gto = False
file_gtp = False
units = 0
units_string = 'i', 'm'
"""
my canvas
"""
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {}
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.
my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.
HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set,
yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
all_ids = self.my_canvas.find_all()
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path) - 3] + 'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines(
)
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D':
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value:
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x) / 2)
elif 'C,' in value:
print(value)
self.current_aperture = self.__get_circle_diameter(
value)
elif 'O,' in value:
pass
elif 'P,' in value:
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.
units_string[GerberCanvas.units], str(y0) +
GerberCanvas.units_string[GerberCanvas.units], str(
x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.
units], outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
if self.file_gto:
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x,
',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.
start_y + 'i', self.x + 'i', self.y + 'i', width=
self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x -
radius) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], outline='black', width=self.
current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
break
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(
self.start_y) - cp_y, float(self.start_x) -
cp_x))
end_angle = math.degrees(math.atan2(float(self.
y) - cp_y, float(self.x) - cp_x))
try:
self.my_canvas.create_arc(str(cp_x + radius
) + GerberCanvas.units_string[
GerberCanvas.units], str(cp_y + radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_x - radius) +
GerberCanvas.units_string[GerberCanvas.
units], str(cp_y - radius) +
GerberCanvas.units_string[GerberCanvas.
units], style=tk.ARC, width=self.
current_aperture, start=start_angle,
extent=end_angle - start_angle, outline
='black')
except UnboundLocalError():
messagebox.showwarning('Warning',
'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x + 1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y),
float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = 1 - distance ** 2 / (2 * radius ** 2)
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start + 1:j_start]
j_temp = item[j_start + 1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start + 1:y_start]
y_temp = item[y_start + 1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start + 1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number) - 5]
if '-' in number:
return first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + 0.1
last_y = float(y1) + 0.1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i',
str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i', outline=
color, fill=color)
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= 0.1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
temp_list = self.file_commands
for item in temp_list:
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
if 'G01' in item:
self.graphics_mode = 1
if 'G03' in item:
self.direction = 270
if 'G02' in item:
self.direction = 90
if 'G74' in item:
self.quadrant_mode = 0
if 'G75' in item:
self.quadrant_mode = 1
if '%AD' in item:
name = item[3:item.find(',') - 1]
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if 'D01' in item and ('I' not in item and 'J' not in item):
self.__get_numbers(item)
self.my_canvas.create_line(self.start_x + 'i', self.start_y +
'i', self.x + 'i', self.y + 'i', width=self.
current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
if 'D01' and 'I' and 'J' in item:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode:
if self.start_x == self.x and self.start_y == self.y:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i',
str(cp_y - radius) + 'i', str(cp_x + radius) +
'i', str(cp_y + radius) + 'i', outline='black',
width=self.current_aperture)
else:
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.
start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) -
cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i',
str(cp_y + radius) + 'i', str(cp_x - radius) +
'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
| import tkinter as tk
from pickplace import PickPlace
import sys
import math
from tkinter import messagebox
import os
DEBUG = False
class GerberCanvas:
file_gto = False
file_gtp = False
units = 0
units_string = ('i', 'm')
"""
my canvas
"""
def __init__(self, frame):
self.x_format = ''
self.y_format = ''
self.units = ''
self.quadrant_mode = 0
self.file_commands = ''
self.file_gtp_commands = ''
self.gerber_file_name = ''
self.AD_commands = {} # dict to hold aperture commands
self.current_aperture = ''
self.x = '0'
self.y = '0'
self.i = '0'
self.j = '0'
self.last_x = ''
self.last_y = ''
self.start_x = ''
self.start_y = ''
self.direction = 0
self.graphics_mode = 0
self.scaled = False
self.bounding_box_size = ()
self._canvas_frame = frame
self.create_canvas()
def create_canvas(self):
self.my_canvas = tk.Canvas(self._canvas_frame, bg='white', bd='1')
self.my_canvas.pack(expand=True, fill='both')
if sys.platform == 'linux':
self.my_canvas.bind('<Button-4>', self.__scale_image_up)
self.my_canvas.bind('<Button-5>', self.__scale_image_down)
else:
self.my_canvas.bind('<MouseWheel>', self.__scale_image)
# fixme fix the scrollbars so that they work correctly
self.y_scrollbar = tk.Scrollbar(self.my_canvas, command=self.my_canvas.yview)
self.y_scrollbar.pack(expand=True, fill='y', anchor='e')
self.x_scrollbar = tk.Scrollbar(self.my_canvas, orient=tk.HORIZONTAL, command=self.my_canvas.xview)
self.x_scrollbar.pack(fill='x', anchor='s')
# Set this only if using in Linux
if sys.platform == 'linux':
self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
self.__part_selected = 0
def load_gerber(self, path, file):
"""load gerber file
:param path: path to the file
:param file: file name to use
"""
try:
# file_path = askopenfilename(title='Open Top Silk Screen File', filetypes=[('GTO files', '*.GTO')],
# initialdir='')
all_ids = self.my_canvas.find_all()
# delete the current image if one exist.
if all_ids:
try:
for item in all_ids:
print(item)
self.my_canvas.delete(item)
except tk.TclError:
messagebox.showerror('Error', tk.TclError)
if path:
self.file_gto = True
try:
with open(os.path.join(path, file), 'r') as gerber_file:
self.file_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
# self._parse_file(gerber_file.read())
self.__parse_file(self.file_commands)
self.my_canvas.create_oval('0i', '0i', '.1i', '.1i', outline='red')
self.gerber_file_name = file
self.scaled = False
# self.bounding_box_size = self.my_canvas.bbox('all')
if DEBUG:
print('Scroll region is : ', self.bounding_box_size)
except IOError:
messagebox.showerror('File Error', 'File did not open, GTO')
finally:
self.file_gto = False
# load top pads into image
self.load_gerber_gtp(os.path.join(path, file))
self.my_canvas.config(scrollregion=self.my_canvas.bbox('all'))
# self.my_canvas.configure(xscrollcommand=self.x_scrollbar.set, yscrollcommand=self.y_scrollbar.set)
def load_gerber_gtp(self, file_path):
self.file_gtp = True
try:
print(file_path)
new_file = 'c' + file_path[1:len(file_path)-3]+'GTP'
print('final name =', new_file)
if file_path:
try:
with open(new_file, 'r') as gerber_file:
self.file_gtp_commands = gerber_file.read().splitlines()
except TypeError:
messagebox.showerror('Type Error', 'Invalid File Type')
self.__parse_file(self.file_gtp_commands)
# self.scaled = False
except IOError:
messagebox.showerror('File Error', 'File did not open, GTP')
def __parse_file(self, commands):
if DEBUG:
print(self.file_commands)
temp_list = commands
for item in temp_list:
if DEBUG:
print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
GerberCanvas.units = 0
if 'MM' in item:
GerberCanvas.units = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # define the aperture
name = item[3:item.find(',')-1]
if DEBUG:
print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start-1:stop]
if DEBUG:
print(value)
self.AD_commands[name] = value
if item[0:1] == 'D': # set the current aperture
item = item[0:item.find('*')]
if DEBUG:
print('I found a ', item)
for key, value in self.AD_commands.items():
self.current_ad_command = key
if item in key:
if 'R,' in value: # for a rectangle
print(value)
x, y = self.__get_rectangle_size(value)
self.rect_x = x
self.rect_y = y
print('Half of x is: ', float(self.rect_x)/2)
# todo send this to a function to get size
elif 'C,' in value: # for a circle
print(value)
self.current_aperture = self.__get_circle_diameter(value)
elif 'O,' in value: # for a ob-round
pass
elif 'P,' in value: # for a polygon
pass
elif 'TARGET' in value:
pass
elif 'THERMAL' in value:
pass
# This is the Flash command. Create a flash of the object.
if 'D03' in item:
if DEBUG:
print('current key is = ', self.current_ad_command)
print(self.AD_commands[self.current_ad_command])
if 'R,' in self.AD_commands[self.current_ad_command]:
if DEBUG:
print('draw a rectangle')
x0 = float(self.start_x) - float(self.rect_x) / 2
y0 = float(self.start_y) + float(self.rect_y) / 2
x1 = float(self.start_x) + float(self.rect_x) / 2
y1 = float(self.start_y) - float(self.rect_y) / 2
self.my_canvas.create_rectangle(str(x0) + GerberCanvas.units_string[GerberCanvas.units],
str(y0) + GerberCanvas.units_string[GerberCanvas.units],
str(x1) + GerberCanvas.units_string[GerberCanvas.units],
str(y1) + GerberCanvas.units_string[GerberCanvas.units],
outline='white', fill='black')
if 'C,' in self.AD_commands[self.current_ad_command]:
print('draw a circle')
# the D02 command is the move to command.
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
# if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
if ('D01' in item) and (('I' not in item) and ('J' not in item)):
if self.file_gto: # draw a line
self.__get_numbers(item)
if DEBUG:
print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x+'i', self.start_y+'i', self.x+'i', self.y+'i',
width=self.current_aperture+'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle/arc
if self.file_gto:
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item) # test
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
try:
self.my_canvas.create_oval(str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
outline='black', width=self.current_aperture)
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
break
else: # This draws arcs
# self.evaluate_arc_command(item)
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if DEBUG:
print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
else:
radius = 0.0
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
# radius = math.degrees(self.__get_extent(radius))
try:
self.my_canvas.create_arc(str(cp_x + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y + radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_x - radius) + GerberCanvas.units_string[GerberCanvas.units],
str(cp_y - radius) + GerberCanvas.units_string[GerberCanvas.units],
style=tk.ARC, width=self.current_aperture, start=start_angle,
extent=end_angle-start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
except UnboundLocalError():
messagebox.showwarning('Warning', 'Something went wrong.')
@staticmethod
def __get_circle_diameter(value):
return value[3:len(value)]
@staticmethod
def __get_rectangle_size(value):
print(value)
find_x = value.find('X'[0:len(value)])
width = value[2:find_x]
length = value[find_x+1:len(value)]
print(width, length)
return width, length
def __get_extent(self, radius):
distance = self.__distance(float(self.start_x), float(self.start_y), float(self.x), float(self.y))
if DEBUG:
print('distance = ', distance)
number = (1-((distance**2) / (2*(radius**2))))
result = number - int(number)
return math.acos(result)
@staticmethod
def __distance(start_x, start_y, end_x, end_y):
"""calculate distance between two points
:param start_x
:param start_y
:param end_x
:param end_y
"""
distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)
return distance
def __set_direction(self):
if self.x == self.start_x:
if self.y < self.start_y:
self.direction = 90
else:
self.direction = 270
if self.y == self.start_y:
if self.x < self.start_x:
self.direction = 0
else:
self.direction = 180
def __get_numbers(self, item):
found = 0
if 'I' in item and 'J' in item and found == 0:
found = 1
i_start = item.find('I')
j_start = item.find('J')
d_start = item.find('D')
i_temp = item[i_start+1:j_start]
j_temp = item[j_start+1:d_start]
j_temp = str(int(j_temp) * -1)
self.i = self.__format_number(i_temp)
self.j = self.__format_number(j_temp)
if 'X' and 'Y' in item:
found = 0
if 'X' in item and 'Y' in item and found == 0:
found = 1
x_start = item.find('X')
y_start = item.find('Y')
d_start = item.find('D')
x_temp = item[x_start+1:y_start]
y_temp = item[y_start+1:d_start]
if ('I' or 'J') in y_temp:
for i in range(1, len(y_temp)):
if y_temp[i] == 'I':
y_temp = y_temp[0:i]
break
y_temp = str(int(y_temp) * -1)
self.x = self.__format_number(x_temp)
self.y = self.__format_number(y_temp)
if 'X' in item and found == 0:
found = 1
x_start = item.find('X')
d_start = item.find('D')
x_temp = item[x_start+1:d_start]
self.x = self.__format_number(x_temp)
if 'Y' in item and found == 0:
found = 1
y_start = item.find('Y')
d_start = item.find('D')
y_temp = item[y_start + 1:d_start]
# flip my y axis
y_temp = str(int(y_temp) * -1)
self.y = self.__format_number(y_temp)
def __format_number(self, number):
how_long = len(number)
if how_long <= int(self.x_format[1]):
if '-' in number:
temp = number[1:len(number)]
return '-.' + temp.zfill(int(self.x_format[1]))
else:
return '.' + number.zfill(int(self.x_format[1]))
elif how_long > int(self.x_format[1]):
last = number[-5:len(number)]
first = number[0:len(number)-5]
if '-' in number:
return first + '.' + last
# return '-' + first + '.' + last
else:
return first + '.' + last
def high_lite_part(self, x, y, layer):
x1 = self.__format_pnp(x)
y1 = self.__format_pnp(y) * -1
last_x = float(x1) + .1
last_y = float(y1) + .1
if layer == 'TopLayer':
color = 'red'
else:
color = 'blue'
self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i', str(last_y) + 'i',
outline=color, fill=color)
# elif layer == 'BottomLayer':
# self.__part_selected = self.my_canvas.create_oval(str(x1) + 'i', str(y1) + 'i', str(last_x) + 'i',
# str(last_y) + 'i', outline='blue', fill='blue')
def delete_current_highlight(self):
if self.__part_selected:
self.my_canvas.delete(self.__part_selected)
def __scale_image_up(self, event=None):
self.scale_factor = 1
self.scale_factor += .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image_down(self, event=None):
self.scale_factor = 1
self.scale_factor -= .1
self.my_canvas.scale('all', 0, 0, self.scale_factor, self.scale_factor)
if PickPlace.is_file_loaded:
PickPlace.adjust_pic_n_place(self.scale_factor)
self.scaled = True
def __scale_image(self, event=None):
if event.delta >= 120:
self.__scale_image_up()
elif event.delta <= -120:
self.__scale_image_down()
self.scaled = True
@staticmethod
def __format_pnp(number):
move1 = float(number) / 10
move2 = move1 / 10
final = move2 / 10
return final
def __parse_file_gtp(self):
# print(self.file_commands)
temp_list = self.file_commands
for item in temp_list:
# print(item)
if '%FSLA' in item:
self.x_format = item[6:8]
self.y_format = item[9:11]
if '%MO' in item:
self.units = item[3:5]
if 'IN' in item:
self.__inch = 1
self.__mm = 0
if 'MM' in item:
self.__inch = 0
self.__mm = 1
# print('units is ', self.units)
if 'G01' in item:
self.graphics_mode = 1 # sets Interpolation mode graphics state parameter to linear
if 'G03' in item:
self.direction = 270 # CounterClockWise
if 'G02' in item:
self.direction = 90 # ClockWise
if 'G74' in item:
self.quadrant_mode = 0 # single Quadrant mode
if 'G75' in item:
self.quadrant_mode = 1 # Multi quadrant mode
if '%AD' in item: # diameter of the circle
name = item[3:item.find(',') - 1]
# print(name)
start = item.find(',')
stop = item.find('*', start)
value = item[start - 1:stop]
# print(value)
self.AD_commands[name] = value[2:len(value)]
if item[0:1] == 'D':
item = item[0:item.find('*')]
# print('I found a ', item)
for key, value in self.AD_commands.items():
if item in key:
self.current_aperture = value
if 'D02' in item:
self.__get_numbers(item)
if 'X' in item and 'Y' not in item:
self.start_x = self.x
if 'Y' in item and 'X' not in item:
self.start_y = self.y
if 'X' in item and 'Y' in item:
self.start_x = self.x
self.start_y = self.y
if ('D01' in item) and (('I' not in item) and ('J' not in item)): # draw a line
self.__get_numbers(item)
# print(self.start_x, ',', self.start_y, ',', self.x, ',', self.y)
self.my_canvas.create_line(self.start_x + 'i', self.start_y + 'i', self.x + 'i', self.y + 'i',
width=self.current_aperture + 'i')
self.start_x = self.x
self.start_y = self.y
# this Draws a circle.
if 'D01' and 'I' and 'J' in item: # draw a circle
self.start_x = self.x
self.start_y = self.y
self.__get_numbers(item)
if self.quadrant_mode: # This draws circles or arcs
if (self.start_x == self.x) and (self.start_y == self.y): # This draws circles
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
if self.i != 0:
radius = float(self.i)
elif self.j != 0:
radius = float(self.j)
self.my_canvas.create_oval(str(cp_x - radius) + 'i', str(cp_y - radius) + 'i',
str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
outline='black', width=self.current_aperture)
else: # This draws arcs
cp_x = float(self.start_x) + float(self.i)
cp_y = float(self.start_y) + float(self.j)
# print(str(cp_x) + ' ' + str(cp_y))
if float(self.i) > 0:
radius = float(self.i)
elif float(self.j) > 0:
radius = float(self.j)
self.__set_direction()
start_angle = math.degrees(math.atan2(float(self.start_y) - cp_y, float(self.start_x) - cp_x))
end_angle = math.degrees(math.atan2(float(self.y) - cp_y, float(self.x) - cp_x))
ext = math.degrees(self.__get_extent(radius))
self.my_canvas.create_arc(str(cp_x + radius) + 'i', str(cp_y + radius) + 'i',
str(cp_x - radius) + 'i', str(cp_y - radius) + 'i', style=tk.ARC,
width=self.current_aperture, start=start_angle,
extent=end_angle - start_angle, outline='black')
# self.my_canvas.create_arc('0', '0', '100', '100', style='arc', start=90, extent=180,
# outline='purple')
| [
18,
19,
20,
23,
25
] |
1,286 | d56aa0f0b7c420e4021736cf8f80923121856d1c | <mask token>
class RUB:
rates = list()
def __init__(self, r):
RUB.rates.append(r)
def ls(self):
print(f'RUB: {RUB.rates}')
class INR:
rates = list()
def __init__(self, r):
INR.rates.append(r)
def ls(self):
print(f'INR: {INR.rates}')
class Factory:
def getExchange(self, currency, rates):
if currency == 'TRY':
return TRY(rates)
elif currency == 'USD':
return USD(rates)
elif currency == 'RUB':
return RUB(rates)
elif currency == 'INR':
return INR(rates)
else:
return None
<mask token>
| <mask token>
class USD:
rates = list()
def __init__(self, r):
USD.rates.append(r)
def ls(self):
print(f'USD: {USD.rates}')
class RUB:
rates = list()
def __init__(self, r):
RUB.rates.append(r)
def ls(self):
print(f'RUB: {RUB.rates}')
class INR:
rates = list()
def __init__(self, r):
INR.rates.append(r)
def ls(self):
print(f'INR: {INR.rates}')
class Factory:
def getExchange(self, currency, rates):
if currency == 'TRY':
return TRY(rates)
elif currency == 'USD':
return USD(rates)
elif currency == 'RUB':
return RUB(rates)
elif currency == 'INR':
return INR(rates)
else:
return None
<mask token>
| <mask token>
class TRY:
<mask token>
def __init__(self, r):
TRY.rates.append(r)
<mask token>
class USD:
rates = list()
def __init__(self, r):
USD.rates.append(r)
def ls(self):
print(f'USD: {USD.rates}')
class RUB:
rates = list()
def __init__(self, r):
RUB.rates.append(r)
def ls(self):
print(f'RUB: {RUB.rates}')
class INR:
rates = list()
def __init__(self, r):
INR.rates.append(r)
def ls(self):
print(f'INR: {INR.rates}')
class Factory:
def getExchange(self, currency, rates):
if currency == 'TRY':
return TRY(rates)
elif currency == 'USD':
return USD(rates)
elif currency == 'RUB':
return RUB(rates)
elif currency == 'INR':
return INR(rates)
else:
return None
<mask token>
| <mask token>
class TRY:
rates = list()
def __init__(self, r):
TRY.rates.append(r)
def ls(self):
print(f'TRY: {TRY.rates}')
class USD:
rates = list()
def __init__(self, r):
USD.rates.append(r)
def ls(self):
print(f'USD: {USD.rates}')
class RUB:
rates = list()
def __init__(self, r):
RUB.rates.append(r)
def ls(self):
print(f'RUB: {RUB.rates}')
class INR:
rates = list()
def __init__(self, r):
INR.rates.append(r)
def ls(self):
print(f'INR: {INR.rates}')
class Factory:
def getExchange(self, currency, rates):
if currency == 'TRY':
return TRY(rates)
elif currency == 'USD':
return USD(rates)
elif currency == 'RUB':
return RUB(rates)
elif currency == 'INR':
return INR(rates)
else:
return None
def main(urlAPI):
resp = requests.get(urlAPI)
if resp.ok is True:
data = resp.text
jsonData = json.loads(data)
parsedData = jsonData['rates']
factory = Factory()
for c in parsedData:
f = factory.getExchange(c, parsedData[c])
TRY.ls(f)
USD.ls(f)
RUB.ls(f)
INR.ls(f)
else:
print(resp.ok)
<mask token>
| import json
import requests
import time
class TRY():
rates = list()
def __init__(self, r):
# if(TRY.rates[-1] != r):
TRY.rates.append(r)
def ls(self):
# print("TRY: "+TRY.rates[e] for e in range(1, len(TRY.rates)))
print(f"TRY: {TRY.rates}")
class USD():
rates = list()
def __init__(self, r):
# if(USD.rates[-1] != r):
USD.rates.append(r)
def ls(self):
# print("TRY: "+TRY.rates[e] for e in range(1, len(TRY.rates)))
print(f"USD: {USD.rates}")
class RUB():
rates = list()
def __init__(self, r):
# if(RUB.rates[-1] != r):
RUB.rates.append(r)
def ls(self):
# print("TRY: "+TRY.rates[e] for e in range(1, len(TRY.rates)))
print(f"RUB: {RUB.rates}")
class INR():
rates = list()
def __init__(self, r):
# if(INR.rates[-1] != r):
INR.rates.append(r)
def ls(self):
# print("TRY: "+TRY.rates[e] for e in range(1, len(TRY.rates)))
print(f"INR: {INR.rates}")
class Factory():
def getExchange(self, currency, rates):
if currency == "TRY":
return TRY(rates)
elif currency == "USD":
return USD(rates) # abd doları
elif currency == "RUB":
return RUB(rates) # rusya rublesi
elif currency == "INR":
return INR(rates) # hindistan rupisi
else:
return None
def main(urlAPI):
resp = requests.get(urlAPI)
if(resp.ok is True):
# print(resp.ok)
data = resp.text
jsonData = json.loads(data)
parsedData = jsonData['rates']
factory = Factory()
# print(parsedData)
for c in parsedData:
f = factory.getExchange(c, parsedData[c])
TRY.ls(f)
USD.ls(f)
RUB.ls(f)
INR.ls(f)
else:
print(resp.ok)
if __name__ == '__main__':
for i in range(3):
# time.sleep(10)
main("https://api.exchangeratesapi.io/latest")
| [
10,
14,
16,
19,
22
] |
1,287 | f3789d70f784345881f705fc809c49ad4e3526bc | <mask token>
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
<mask token>
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
| <mask token>
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
| <mask token>
npratio = 4
MAX_SENT_LENGTH = 30
MAX_SENTS = 50
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
| <mask token>
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from keras import backend as K
npratio = 4
MAX_SENT_LENGTH = 30
MAX_SENTS = 50
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
"""多输入:输入 user_id、 news_input"""
"""输入单个用户的 user id 和 一篇 news 的信息"""
user_id, news_input = inputs[0], inputs[1]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=
'int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)
]
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
clicked_news_vec = [0] * MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,
axis=1))(news) for news in clicked_news_vec], axis=1)
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,
news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,
attention_news_weight])
candidate_news_vec = [0] * (1 + npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]
)
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for
candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits
| # -*- coding: utf-8 -*-
"""
======================
@author : Zhang Xu
@time : 2021/9/8:16:29
@email : [email protected]
@content : tensorflow subclassing 复现 NPA
======================
"""
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.layers import *
from keras import backend as K
npratio = 4
MAX_SENT_LENGTH = 30 # 一篇news的单词数量
MAX_SENTS = 50 # 一个用户的点击的news的数量
# news encoder
# 输入:user id, 1篇news的信息
# 输出:news representation
class NewsEncoder(tf.keras.Model):
def __init__(self):
super(NewsEncoder, self).__init__(name='NewsEncoder')
# user_id 部分
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
# news 部分
self.news_input_layer = Input()
self.news_embedding_layer = Embedding()
self.news_conv_layer = Conv1D()
self.news_dropout_layer_1 = Dropout(0.2)
self.news_dropout_layer_2 = Dropout(0.2)
# personalized attention 部分
self.pa_dense_layer = Dense()
self.pa_2_1_dot_layer = Dot()
self.pa_softmax_layer = Activation('softmax')
self.pa_1_1_dot_layer = Dot()
def call(self, inputs):
'''多输入:输入 user_id、 news_input'''
'''输入单个用户的 user id 和 一篇 news 的信息'''
user_id, news_input = inputs[0], inputs[1]
# qw
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qw = self.userid_flatten_layer(x1)
# news representation
x2 = self.news_input_layer(news_input)
x2 = self.news_embedding_layer(x2)
x2 = self.news_dropout_layer_1(x2)
x2 = self.news_conv_layer(x2)
x2 = self.news_dropout_layer_2(x2)
# personalized attention
qw = self.pa_dense_layer(qw)
attention_a = self.pa_2_1_dot_layer([x2, qw])
attention_weight = self.pa_softmax_layer(attention_a)
news_rep = self.pa_1_1_dot_layer([x2, attention_weight])
return news_rep
# NPA
# 输入:user id 和 该用户所有的 clicked news(N篇) 和 candidate news(K篇)
# 输出:对K篇 candidate news 做出预测,分别给出点击的概率
class NPA(tf.keras.Model):
def __init__(self):
super(NPA, self).__init__(name='NPA')
# user id 部分
self.userid_input_layer = Input()
self.userid_embedding_layer = Embedding()
self.userid_dense_layer = Dense()
self.userid_flatten_layer = Flatten()
# clicked news 部分
self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(MAX_SENTS)]
self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]
self.clickednews_dense_layer = Dense()
self.clickednews_2_1_dot_layer = Dot((2, 1))
self.clickednews_softmax_layer = Activation('softmax')
self.clickednews_1_1_dot_layer = Dot((1, 1))
# candidate news 部分
self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(1 + npratio)]
self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)]
# click prediction
self.cp_dot_layer = dot()
self.cp_concatenate = concatenate()
self.cp_activation_layer = Activation('softmax')
def call(self, inputs):
user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]
# qd
x1 = self.userid_input_layer(user_id)
x1 = self.userid_embedding_layer(x1)
x1 = self.userid_dense_layer(x1)
qd = self.userid_flatten_layer(x1)
# clicked news
clicked_news_vec = [0]*MAX_SENTS
for i in range(len(clicked_news)):
xx = self.clickednews_input_layer[i](clicked_news[i])
clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])
clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x, axis=1))(news) for news in clicked_news_vec], axis=1)
# qd 与 click_news_rep 进行 personalized attention
news_temp_dense = self.clickednews_dense_layer(qd)
attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep, news_temp_dense])
attention_news_weight = self.clickednews_softmax_layer(attention_news)
user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep, attention_news_weight])
# candidate news
candidate_news_vec = [0]*(1+npratio)
for i in range(len(candidate_news)):
xx = self.candidatenews_input_layer[i](candidate_news[i])
candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx])
# click prediction
# candidate news representation 与 user representation 进行 dot 和 softmax
logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for candidate_news in candidate_news_vec]
logits = self.cp_activation_layer(self.cp_concatenate(logits))
return logits | [
5,
6,
7,
8,
9
] |
1,288 | 4a17db6b65e1615b0d519581b3e63bc34ad16093 | <mask token>
| <mask token>
data.drop(columns=drop, inplace=True)
<mask token>
model.fit(X_train, Y_train)
<mask token>
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
<mask token>
print(
f"""Cross validation is: {cross_val}
and mean: {np.mean(cross_val)}
and std:{np.std(cross_val)}"""
)
<mask token>
| <mask token>
path = (
'/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'
)
data = pd.read_excel(path)
drop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',
'encoded origin']
data.drop(columns=drop, inplace=True)
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
<mask token>
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=
0.2, random_state=13)
model = ExtraTreesRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
model_for_cross = ExtraTreesRegressor()
cross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=
'neg_root_mean_squared_error')
print(
f"""Cross validation is: {cross_val}
and mean: {np.mean(cross_val)}
and std:{np.std(cross_val)}"""
)
<mask token>
| <mask token>
import pandas as pd
import numpy as np
from sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor
from sklearn.metrics import r2_score as r2
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split
<mask token>
path = (
'/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'
)
data = pd.read_excel(path)
drop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders',
'encoded origin']
data.drop(columns=drop, inplace=True)
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
<mask token>
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=
0.2, random_state=13)
model = ExtraTreesRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
model_for_cross = ExtraTreesRegressor()
cross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring=
'neg_root_mean_squared_error')
print(
f"""Cross validation is: {cross_val}
and mean: {np.mean(cross_val)}
and std:{np.std(cross_val)}"""
)
<mask token>
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 15:36:38 2021
@author: mav24
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import QuantileTransformer, StandardScaler, PowerTransformer, MaxAbsScaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.ensemble import ExtraTreesRegressor, IsolationForest, GradientBoostingRegressor
from sklearn.metrics import r2_score as r2
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import cross_val_score, KFold, GridSearchCV, train_test_split
"""
Reading the training data
"""
path = '/home/mav24/Documents/Development/Regeneration/Project/Data/training_data.xlsx'
data = pd.read_excel(path)
#data.drop(columns=['Unnamed: 0', 'diesel', 'station wagon'], inplace=True)
drop = ['Unnamed: 0', 'encoded car brand', 'station wagon', 'cylinders', 'encoded origin']
data.drop(columns=drop, inplace=True)
# Scaling the data Standar sceler
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
"""
# Outliers Detection
iso = IsolationForest(contamination=0.05)
yhat = iso.fit_predict(X_scaled)
mask = yhat != -1
X_scaled, Y = X_scaled[mask, :], Y[mask]
"""
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
#model = GradientBoostingRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With Standar Scaler')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
model_for_cross = ExtraTreesRegressor()
#model_for_cross = GradientBoostingRegressor()
cross_val = cross_val_score(model_for_cross, X_scaled, Y, cv=10, scoring='neg_root_mean_squared_error')
print(f'Cross validation is: {cross_val} \n and mean: {np.mean(cross_val)} \n and std:{np.std(cross_val)}')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
pipe = Pipeline(steps=[('scaler', StandardScaler()),
('extr', ExtraTreesRegressor(n_jobs=3))])
param_grid = {'extr__n_estimators':[100],
#'extr__criterion':['squared_error', 'mse', 'mae'],
'extr__max_depth':[None, 10, 20, 50, 100, 200, len(X_train)],
#'extr__min_samples_split':[1,2,3,5,10],
#'extr__min_samples_leaf':[1,2,3,5,10],
'extr__max_features':['auto', 'sqrt', 'log2'],
#'extr__max_leaf_nodes':[None, 1,2,3,4,5],
}
grid = GridSearchCV(pipe, param_grid, scoring='r2')
grid.fit(X_train, Y_train)
print(f'Best estimators for ExtraTreesRegressor: {grid.best_estimator_}')
print(f'Best score is: {grid.best_score_}')
"""
"""
# Scaling the data PowerTransformer
X = data.drop(columns='mpg')
Y = data['mpg']
scaler = PowerTransformer()
X_scaled = scaler.fit_transform(X)
# Splitting the training data to train and test
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.20, random_state=13)
# Training and prediction
model = ExtraTreesRegressor()
model.fit(X_train, Y_train)
pred_test = model.predict(X_test)
print('With PowerTransformer')
print(f'The R2 accuracy is: {r2(Y_test, pred_test)}')
print(f'The mean square error is: {mse(Y_test, pred_test)}')
print(f'Mean absolute error is: {mae(Y_test, pred_test)}')
"""
"""
Validate the model to unseen data
"""
#path_val = '/home/mav24/Documents/Development/Regeneration/Project/Data/vavlidation_data.xlsx'
#data_val = pd.read_excel(path_val)
| [
0,
1,
2,
3,
4
] |
1,289 | 563e534e4794aa872dcdc5319b9a1943d19f940f | <mask token>
class Cells:
<mask token>
<mask token>
<mask token>
def __init__(self, nx, ny, density=5):
self.nx = nx
self.ny = ny
self._cells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._nextCells = [[Cells.UNDEFINED for y in range(ny)] for x in
range(nx)]
self._gen = [[(0) for y in range(ny)] for x in range(nx)]
for x in range(nx):
for y in range(ny):
status = Cells.ALIVE if random.randint(0, 100
) < density else Cells.DEAD
self._cells[x][y] = status
self._gen[x][y] = status - 1
def cell(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return Cells.DEAD
return self._cells[x][y]
<mask token>
def _countAliveNeighbours(self, x, y):
aliveNeighbours = 0
neighbours = (-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (
0, 1), (1, 1)
for ix, iy in neighbours:
neighbour = self.cell(x + ix, y + iy)
if neighbour == Cells.ALIVE:
aliveNeighbours += 1
return aliveNeighbours
def survive(self):
for x in range(self.nx):
for y in range(self.ny):
aliveNeighbours = self._countAliveNeighbours(x, y)
if self._cells[x][y] == Cells.ALIVE and (aliveNeighbours <=
1 or aliveNeighbours >= 4):
self._nextCells[x][y] = Cells.DEAD
self._gen[x][y] = 0
elif self._cells[x][y] == Cells.DEAD and aliveNeighbours == 3:
self._nextCells[x][y] = Cells.ALIVE
self._gen[x][y] = 1
else:
self._nextCells[x][y] = self._cells[x][y]
self._gen[x][y] += 1
self._cells = self._nextCells[:]
| <mask token>
class Cells:
<mask token>
<mask token>
<mask token>
def __init__(self, nx, ny, density=5):
self.nx = nx
self.ny = ny
self._cells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._nextCells = [[Cells.UNDEFINED for y in range(ny)] for x in
range(nx)]
self._gen = [[(0) for y in range(ny)] for x in range(nx)]
for x in range(nx):
for y in range(ny):
status = Cells.ALIVE if random.randint(0, 100
) < density else Cells.DEAD
self._cells[x][y] = status
self._gen[x][y] = status - 1
def cell(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return Cells.DEAD
return self._cells[x][y]
def gen(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return 0
return self._gen[x][y]
def _countAliveNeighbours(self, x, y):
aliveNeighbours = 0
neighbours = (-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (
0, 1), (1, 1)
for ix, iy in neighbours:
neighbour = self.cell(x + ix, y + iy)
if neighbour == Cells.ALIVE:
aliveNeighbours += 1
return aliveNeighbours
def survive(self):
for x in range(self.nx):
for y in range(self.ny):
aliveNeighbours = self._countAliveNeighbours(x, y)
if self._cells[x][y] == Cells.ALIVE and (aliveNeighbours <=
1 or aliveNeighbours >= 4):
self._nextCells[x][y] = Cells.DEAD
self._gen[x][y] = 0
elif self._cells[x][y] == Cells.DEAD and aliveNeighbours == 3:
self._nextCells[x][y] = Cells.ALIVE
self._gen[x][y] = 1
else:
self._nextCells[x][y] = self._cells[x][y]
self._gen[x][y] += 1
self._cells = self._nextCells[:]
| <mask token>
class Cells:
UNDEFINED = 0
DEAD = 1
ALIVE = 2
def __init__(self, nx, ny, density=5):
self.nx = nx
self.ny = ny
self._cells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._nextCells = [[Cells.UNDEFINED for y in range(ny)] for x in
range(nx)]
self._gen = [[(0) for y in range(ny)] for x in range(nx)]
for x in range(nx):
for y in range(ny):
status = Cells.ALIVE if random.randint(0, 100
) < density else Cells.DEAD
self._cells[x][y] = status
self._gen[x][y] = status - 1
def cell(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return Cells.DEAD
return self._cells[x][y]
def gen(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return 0
return self._gen[x][y]
def _countAliveNeighbours(self, x, y):
aliveNeighbours = 0
neighbours = (-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (
0, 1), (1, 1)
for ix, iy in neighbours:
neighbour = self.cell(x + ix, y + iy)
if neighbour == Cells.ALIVE:
aliveNeighbours += 1
return aliveNeighbours
def survive(self):
for x in range(self.nx):
for y in range(self.ny):
aliveNeighbours = self._countAliveNeighbours(x, y)
if self._cells[x][y] == Cells.ALIVE and (aliveNeighbours <=
1 or aliveNeighbours >= 4):
self._nextCells[x][y] = Cells.DEAD
self._gen[x][y] = 0
elif self._cells[x][y] == Cells.DEAD and aliveNeighbours == 3:
self._nextCells[x][y] = Cells.ALIVE
self._gen[x][y] = 1
else:
self._nextCells[x][y] = self._cells[x][y]
self._gen[x][y] += 1
self._cells = self._nextCells[:]
| import random
import time
class Cells:
UNDEFINED = 0
DEAD = 1
ALIVE = 2
def __init__(self, nx, ny, density=5):
self.nx = nx
self.ny = ny
self._cells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._nextCells = [[Cells.UNDEFINED for y in range(ny)] for x in
range(nx)]
self._gen = [[(0) for y in range(ny)] for x in range(nx)]
for x in range(nx):
for y in range(ny):
status = Cells.ALIVE if random.randint(0, 100
) < density else Cells.DEAD
self._cells[x][y] = status
self._gen[x][y] = status - 1
def cell(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return Cells.DEAD
return self._cells[x][y]
def gen(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return 0
return self._gen[x][y]
def _countAliveNeighbours(self, x, y):
aliveNeighbours = 0
neighbours = (-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (
0, 1), (1, 1)
for ix, iy in neighbours:
neighbour = self.cell(x + ix, y + iy)
if neighbour == Cells.ALIVE:
aliveNeighbours += 1
return aliveNeighbours
def survive(self):
for x in range(self.nx):
for y in range(self.ny):
aliveNeighbours = self._countAliveNeighbours(x, y)
if self._cells[x][y] == Cells.ALIVE and (aliveNeighbours <=
1 or aliveNeighbours >= 4):
self._nextCells[x][y] = Cells.DEAD
self._gen[x][y] = 0
elif self._cells[x][y] == Cells.DEAD and aliveNeighbours == 3:
self._nextCells[x][y] = Cells.ALIVE
self._gen[x][y] = 1
else:
self._nextCells[x][y] = self._cells[x][y]
self._gen[x][y] += 1
self._cells = self._nextCells[:]
| import random
import time
class Cells:
UNDEFINED = 0
DEAD = 1
ALIVE = 2
def __init__(self, nx, ny, density = 5):
self.nx = nx
self.ny = ny
self._cells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._nextCells = [[Cells.UNDEFINED for y in range(ny)] for x in range(nx)]
self._gen = [[0 for y in range(ny)] for x in range(nx)]
for x in range(nx):
for y in range(ny):
# status = random.choice((Cells.DEAD, Cells.ALIVE))
status = Cells.ALIVE if random.randint(0, 100) < density else Cells.DEAD
self._cells[x][y] = status
self._gen[x][y] = status - 1
def cell(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return Cells.DEAD
return self._cells[x][y]
# return self._cells[x % self.nx][y % self.ny]
def gen(self, x, y):
if x < 0 or x >= self.nx or y < 0 or y >= self.ny:
return 0
return self._gen[x][y]
def _countAliveNeighbours(self, x, y):
aliveNeighbours = 0
neighbours = ((-1, -1), (0, -1), ( 1, -1),
(-1, 0), ( 1, 0),
(-1, 1), (0, 1), ( 1, 1))
for (ix, iy) in neighbours:
neighbour = self.cell(x + ix, y + iy)
if neighbour == Cells.ALIVE:
aliveNeighbours += 1
return aliveNeighbours
def survive(self):
for x in range(self.nx):
for y in range(self.ny):
aliveNeighbours = self._countAliveNeighbours(x, y)
if self._cells[x][y] == Cells.ALIVE and (aliveNeighbours <= 1 or aliveNeighbours >= 4):
self._nextCells[x][y] = Cells.DEAD
self._gen[x][y] = 0
elif self._cells[x][y] == Cells.DEAD and aliveNeighbours == 3:
self._nextCells[x][y] = Cells.ALIVE
self._gen[x][y] = 1
else:
self._nextCells[x][y] = self._cells[x][y]
self._gen[x][y] += 1
self._cells = self._nextCells[:]
| [
5,
6,
7,
8,
9
] |
1,290 | 85dfb30a380dc73f5a465c8f4be84decccfbcb59 | /Users/sterlingbutters/anaconda3/lib/python3.6/encodings/cp037.py | null | null | null | null | [
0
] |
1,291 | 6192099bdecffd9ce3576f4034567478145115a0 | <mask token>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
<mask token>
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<mask token>
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<mask token>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<mask token>
| <mask token>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
<mask token>
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<mask token>
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<mask token>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<mask token>
| <mask token>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
def text_read(where, row, col):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace('\n', '')
result2 = line2.split(' ')
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<mask token>
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<mask token>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<mask token>
| <mask token>
sys.setrecursionlimit(100000)
dx = [1, 0, 0, -1]
dy = [0, 1, -1, 0]
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
def text_read(where, row, col):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace('\n', '')
result2 = line2.split(' ')
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
def bfs(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
q = queue.Queue()
q.put(start)
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
ans[1] += 1
while not q.empty():
if visit[end[0]][end[1]] == 1:
break
cur_task = q.get()
x = cur_task[0]
y = cur_task[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
q.put([nx, ny])
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def test_floor():
where = 'test1'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def fifth_floor():
where = 'fifth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
fifth_floor()
forth_floor()
third_floor()
second_floor()
first_floor()
| import queue
import copy
import heapq
import sys
sys.setrecursionlimit(100000)
dx =[1,0,0,-1]
dy=[0,1,-1,0]
class PriorityQueue:
pq=[]
elements={}
task=0
def insert(self , priority,x_val,y_val):
entry = [priority, self.task,x_val,y_val]
self.elements[self.task]=entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self,task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val , y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val , y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where , out_list,ans,row,col):
f = open( where + "_output.txt", 'w')
for i in range(1,row+1):
for j in range(1,col+1):
data ="%d " %out_list[i][j]
f.write(data)
f.write("\n")
f.write("---\n")
data2 = "length = %d\n" %ans[0]
f.write(data2)
data3 = "time = %d" %ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open("./input/" + where+".txt" , 'r')
line = f.readline()
line = line.replace("\n", "")
result = line.split(" ")
a=[int(result[0]),int(result[1]),int(result[2])]
return a
def text_read(where,row,col):
f = open("./input/"+where+".txt", 'r')
line = f.readline()
list1 = [[0 for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace("\n", "")
result2 = line2.split(" ")
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos , out_list , row , col):
for r in range(1,row+1):
for c in range(1,col+1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c] == 4:
pos.append([r,c])
return pos
def bfs(start ,end, out_list , row , col , ans):
des = [[0 for c in range(col+1)] for r in range(row+1)]
visit = [[0 for c in range(col+1)] for r in range(row+1)]
q = queue.Queue()
q.put(start)
visit[start[0]][start[1]]=1;
des[start[0]][start[1]]=0;
ans[1] +=1
while not q.empty():
if visit[end[0]][end[1]] ==1:
break
cur_task = q.get()
x=cur_task[0]
y=cur_task[1]
for k in range (4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <=row and ny >=1 and ny<=col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] =1
des[nx][ny] = des[x][y] +1
q.put([nx,ny])
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0],end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def IDS(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
find=[0]
limit = 0
while find[0] != 1:
limit +=1
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0;
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num=limit
target = [end[0],end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start , end , out_list , row , col ,ans , limit,des,visit,find):
if visit[end[0]][end[1]] == 1:
find[0]=1
return
x=start[0]
y=start[1]
for k in range(4):
nx = x+dx[k]
ny=y+dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
if des[x][y]+1 <=limit:
visit[nx][ny]=1
des[nx][ny] = des[x][y]+1
next_start=[nx,ny]
ans[1]+=1
dfs(next_start , end , out_list , row , col , ans , limit, des , visit,find)
def astar(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])+des[nx][ny]
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def greedy(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def test_floor():
where = "test1"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where , row , col)
pos = []
pos = position_check(pos,out_list, row , col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans=[0,0]
#path1=bfs(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2=bfs(pos[1],pos[2],deepcopy_copy2,row,col,ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0],pos[1],deepcopy_copy1,row,col,ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def fifth_floor():
where = "fifth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def forth_floor():
where = "fourth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = "third_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = "second_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = "first_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
#test_floor()
fifth_floor()
forth_floor()
third_floor()
second_floor()
first_floor()
| [
12,
17,
18,
23,
25
] |
1,292 | 94d296b5a13bfa59dba5812da31707f9db9080af | <mask token>
| <mask token>
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
<mask token>
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
| <mask token>
train = list(read('train'))
test = list(read('test'))
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
| <mask token>
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print('Train size: {}'.format(len(train)))
print('Test size: {}'.format(len(test)))
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,
epochs=100000, alpha=0.001, mini_batch_size=100)
| """
Implements a Neural Network
"""
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print("Train size: {}".format(len(train)))
print("Test size: {}".format(len(test)))
# Normalization for values
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)
| [
0,
1,
2,
3,
4
] |
1,293 | 0e337ce21450e0fdb7688183d0542ebf902a9614 | <mask token>
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password):
try:
cookie_request = requests.post(URL + ADMIN + mailinglist, data={
'adminpw': password})
cookie_request.raise_for_status()
return cookie_request.cookies
except:
print(messages.error_message)
return None
def make_roster(mailinglist, cookies):
roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)
roster_soup = BeautifulSoup(roster_request.text, 'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4]
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ', '@'))
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input(
"What's the name of the mailing list you want to download?> ")
password = input('What is the list admin password?> ')
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster, 1):
print(count, '/', len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print('Saved', len(roster), 'email addresses in', os.path.
abspath(filename))
input('press enter to close')
break
<mask token>
| <mask token>
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password):
try:
cookie_request = requests.post(URL + ADMIN + mailinglist, data={
'adminpw': password})
cookie_request.raise_for_status()
return cookie_request.cookies
except:
print(messages.error_message)
return None
def make_roster(mailinglist, cookies):
roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)
roster_soup = BeautifulSoup(roster_request.text, 'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4]
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ', '@'))
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input(
"What's the name of the mailing list you want to download?> ")
password = input('What is the list admin password?> ')
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster, 1):
print(count, '/', len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print('Saved', len(roster), 'email addresses in', os.path.
abspath(filename))
input('press enter to close')
break
if __name__ == '__main__':
main()
| <mask token>
URL = 'https://mailman.kcl.ac.uk/mailman/'
ADMIN = 'admin/'
ROSTER = 'roster/'
OUTPUT_FOLDER = '../output/'
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password):
try:
cookie_request = requests.post(URL + ADMIN + mailinglist, data={
'adminpw': password})
cookie_request.raise_for_status()
return cookie_request.cookies
except:
print(messages.error_message)
return None
def make_roster(mailinglist, cookies):
roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)
roster_soup = BeautifulSoup(roster_request.text, 'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4]
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ', '@'))
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input(
"What's the name of the mailing list you want to download?> ")
password = input('What is the list admin password?> ')
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster, 1):
print(count, '/', len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print('Saved', len(roster), 'email addresses in', os.path.
abspath(filename))
input('press enter to close')
break
if __name__ == '__main__':
main()
| import messages
import os
import requests
from bs4 import BeautifulSoup
URL = 'https://mailman.kcl.ac.uk/mailman/'
ADMIN = 'admin/'
ROSTER = 'roster/'
OUTPUT_FOLDER = '../output/'
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password):
try:
cookie_request = requests.post(URL + ADMIN + mailinglist, data={
'adminpw': password})
cookie_request.raise_for_status()
return cookie_request.cookies
except:
print(messages.error_message)
return None
def make_roster(mailinglist, cookies):
roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)
roster_soup = BeautifulSoup(roster_request.text, 'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4]
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ', '@'))
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input(
"What's the name of the mailing list you want to download?> ")
password = input('What is the list admin password?> ')
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster, 1):
print(count, '/', len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print('Saved', len(roster), 'email addresses in', os.path.
abspath(filename))
input('press enter to close')
break
if __name__ == '__main__':
main()
|
import messages
import os
import requests
from bs4 import BeautifulSoup
URL = "https://mailman.kcl.ac.uk/mailman/"
ADMIN = "admin/"
ROSTER = "roster/"
OUTPUT_FOLDER = "../output/"
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password): # this opens up the admin page, enters the password, and saves the returned cookie to be passed to the next request
try:
cookie_request = requests.post(URL+ ADMIN + mailinglist, data = {'adminpw':password})
cookie_request.raise_for_status()
return cookie_request.cookies
except: # raises exception if the password is incorrect (or any other 4XX error)
print(messages.error_message)
return None
def make_roster(mailinglist, cookies): # takes the cookie from the cookie request and requests the roster
roster_request = requests.get(URL+ ROSTER + mailinglist, cookies = cookies)
roster_soup = BeautifulSoup(roster_request.text,'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4] # the last 4 links on the page are admin links
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ','@')) #the mailman list inexplicably uses a stupid ' at ' display format
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input("What's the name of the mailing list you want to download?> ")
password = input("What is the list admin password?> ")
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster,1):
print(count,"/",len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print("Saved", len(roster), "email addresses in", os.path.abspath(filename))
input("press enter to close")
break
if __name__ == '__main__':
main() | [
4,
5,
6,
7,
8
] |
1,294 | e6320bc1c344c87818a4063616db0c63b7b8be49 | <mask token>
def button_click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = 'addition'
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == 'addition':
e.insert(0, f_num + int(second_number))
if math == 'subtraction':
e.insert(0, f_num - int(second_number))
if math == 'multiplication':
e.insert(0, f_num * int(second_number))
if math == 'division':
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = 'subtraction'
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = 'multiplication'
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = 'division'
f_num = int(first_number)
e.delete(0, END)
<mask token>
| <mask token>
global math
<mask token>
root.title('Calculator')
<mask token>
e.grid(columnspan=3)
def button_click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = 'addition'
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == 'addition':
e.insert(0, f_num + int(second_number))
if math == 'subtraction':
e.insert(0, f_num - int(second_number))
if math == 'multiplication':
e.insert(0, f_num * int(second_number))
if math == 'division':
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = 'subtraction'
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = 'multiplication'
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = 'division'
f_num = int(first_number)
e.delete(0, END)
<mask token>
buttonClear.grid(row=1, column=0, columnspan=3)
<mask token>
root.mainloop()
| <mask token>
global math
root = Tk()
root.title('Calculator')
e = Entry(root, width=60, borderwidth=5)
e.grid(columnspan=3)
def button_click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = 'addition'
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == 'addition':
e.insert(0, f_num + int(second_number))
if math == 'subtraction':
e.insert(0, f_num - int(second_number))
if math == 'multiplication':
e.insert(0, f_num * int(second_number))
if math == 'division':
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = 'subtraction'
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = 'multiplication'
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = 'division'
f_num = int(first_number)
e.delete(0, END)
buttonClear = Button(root, width=52, height=8, text='Clear', command=
button_clear)
buttonClear.grid(row=1, column=0, columnspan=3)
button7 = Button(root, width=16, height=8, text='7', command=lambda :
button_click(7)).grid(row=3, column=0)
button8 = Button(root, width=16, height=8, text='8', command=lambda :
button_click(8)).grid(row=3, column=1)
button9 = Button(root, width=16, height=8, text='9', command=lambda :
button_click(9)).grid(row=3, column=2)
button4 = Button(root, width=16, height=8, text='4', command=lambda :
button_click(4)).grid(row=4, column=0)
button5 = Button(root, width=16, height=8, text='5', command=lambda :
button_click(5)).grid(row=4, column=1)
button6 = Button(root, width=16, height=8, text='6', command=lambda :
button_click(6)).grid(row=4, column=2)
button1 = Button(root, width=16, height=8, text='1', command=lambda :
button_click(1)).grid(row=5, column=0)
button2 = Button(root, width=16, height=8, text='2', command=lambda :
button_click(2)).grid(row=5, column=1)
button3 = Button(root, width=16, height=8, text='3', command=lambda :
button_click(3)).grid(row=5, column=2)
button0 = Button(root, width=16, height=8, text='0', command=lambda :
button_click(0)).grid(row=6, column=0)
buttonEqual = Button(root, width=34, height=8, text='=', command=button_equal
).grid(row=6, column=1, columnspan=2)
buttonPlus = Button(root, width=16, height=8, text='+', command=button_add
).grid(row=7, column=0)
buttonSubtract = Button(root, width=16, height=8, text='-', command=
button_subtract).grid(row=7, column=1)
buttonMul = Button(root, width=16, height=8, text='*', command=button_multiply
).grid(row=7, column=2)
root.mainloop()
| from tkinter import *
global math
root = Tk()
root.title('Calculator')
e = Entry(root, width=60, borderwidth=5)
e.grid(columnspan=3)
def button_click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = 'addition'
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == 'addition':
e.insert(0, f_num + int(second_number))
if math == 'subtraction':
e.insert(0, f_num - int(second_number))
if math == 'multiplication':
e.insert(0, f_num * int(second_number))
if math == 'division':
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = 'subtraction'
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = 'multiplication'
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = 'division'
f_num = int(first_number)
e.delete(0, END)
buttonClear = Button(root, width=52, height=8, text='Clear', command=
button_clear)
buttonClear.grid(row=1, column=0, columnspan=3)
button7 = Button(root, width=16, height=8, text='7', command=lambda :
button_click(7)).grid(row=3, column=0)
button8 = Button(root, width=16, height=8, text='8', command=lambda :
button_click(8)).grid(row=3, column=1)
button9 = Button(root, width=16, height=8, text='9', command=lambda :
button_click(9)).grid(row=3, column=2)
button4 = Button(root, width=16, height=8, text='4', command=lambda :
button_click(4)).grid(row=4, column=0)
button5 = Button(root, width=16, height=8, text='5', command=lambda :
button_click(5)).grid(row=4, column=1)
button6 = Button(root, width=16, height=8, text='6', command=lambda :
button_click(6)).grid(row=4, column=2)
button1 = Button(root, width=16, height=8, text='1', command=lambda :
button_click(1)).grid(row=5, column=0)
button2 = Button(root, width=16, height=8, text='2', command=lambda :
button_click(2)).grid(row=5, column=1)
button3 = Button(root, width=16, height=8, text='3', command=lambda :
button_click(3)).grid(row=5, column=2)
button0 = Button(root, width=16, height=8, text='0', command=lambda :
button_click(0)).grid(row=6, column=0)
buttonEqual = Button(root, width=34, height=8, text='=', command=button_equal
).grid(row=6, column=1, columnspan=2)
buttonPlus = Button(root, width=16, height=8, text='+', command=button_add
).grid(row=7, column=0)
buttonSubtract = Button(root, width=16, height=8, text='-', command=
button_subtract).grid(row=7, column=1)
buttonMul = Button(root, width=16, height=8, text='*', command=button_multiply
).grid(row=7, column=2)
root.mainloop()
| from tkinter import *
global math
root = Tk()
root.title("Calculator")
e = Entry(root,width=60,borderwidth=5)
e.grid(columnspan=3)
def button_click(number):
#e.delete(0, END)
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
global math
math = "addition"
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, f_num + int(second_number))
if math == "subtraction":
e.insert(0, f_num - int(second_number))
if math == "multiplication":
e.insert(0, f_num * int(second_number))
if math == "division":
e.insert(0, f_num / int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = int(first_number)
e.delete(0, END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = "division"
f_num = int(first_number)
e.delete(0, END)
buttonClear = Button(root,width=52,height=8,text="Clear",command=button_clear)
buttonClear.grid(row=1,column=0,columnspan=3)
button7 = Button(root,width=16,height=8,text="7",command=lambda: button_click(7)).grid(row=3,column=0)
button8 = Button(root,width=16,height=8,text="8",command=lambda: button_click(8)).grid(row=3,column=1)
button9 = Button(root,width=16,height=8,text="9",command=lambda: button_click(9)).grid(row=3,column=2)
button4 = Button(root,width=16,height=8,text="4",command=lambda: button_click(4)).grid(row=4,column=0)
button5 = Button(root,width=16,height=8,text="5",command=lambda: button_click(5)).grid(row=4,column=1)
button6 = Button(root,width=16,height=8,text="6",command=lambda: button_click(6)).grid(row=4,column=2)
button1 = Button(root,width=16,height=8,text="1",command=lambda: button_click(1)).grid(row=5,column=0)
button2 = Button(root,width=16,height=8,text="2",command=lambda: button_click(2)).grid(row=5,column=1)
button3 = Button(root,width=16,height=8,text="3",command=lambda: button_click(3)).grid(row=5,column=2)
button0 = Button(root,width=16,height=8,text="0",command=lambda: button_click(0)).grid(row=6,column=0)
buttonEqual = Button(root,width=34,height=8,text="=",command=button_equal).grid(row=6,column=1,columnspan=2)
buttonPlus = Button(root,width=16,height=8,text="+",command=button_add).grid(row=7,column=0)
buttonSubtract = Button(root,width=16,height=8,text="-",command=button_subtract).grid(row=7,column=1)
buttonMul = Button(root,width=16,height=8,text="*",command=button_multiply).grid(row=7,column=2)
root.mainloop() | [
7,
8,
9,
10,
11
] |
1,295 | 13fa650557a4a8827c9fb2e514bed178df19a32c | <mask token>
| <mask token>
def check_image(file_type):
match = re.match('image/*', file_type)
return match
<mask token>
| <mask token>
def check_image(file_type):
match = re.match('image/*', file_type)
return match
def compress_image(data):
with open(PATH.format(data['name']), 'wb+') as file:
file.write(data['binary'])
image = Image.open(PATH.format(data['name']))
new_img = image.resize((128, 128))
new_img.save(PATH.format(data['name']))
with open(PATH.format(data['name']), 'rb') as image_file:
image = image_file.read()
os.remove(PATH.format(data['name']))
return image
| <mask token>
import re
import os
from PIL import Image
from common.constant import PATH
def check_image(file_type):
match = re.match('image/*', file_type)
return match
def compress_image(data):
with open(PATH.format(data['name']), 'wb+') as file:
file.write(data['binary'])
image = Image.open(PATH.format(data['name']))
new_img = image.resize((128, 128))
new_img.save(PATH.format(data['name']))
with open(PATH.format(data['name']), 'rb') as image_file:
image = image_file.read()
os.remove(PATH.format(data['name']))
return image
| """ Image Check / Compress Image"""
import re
import os
from PIL import Image
from common.constant import PATH
def check_image(file_type):
match = re.match("image/*", file_type)
return match
def compress_image(data):
with open(PATH.format(data['name']), 'wb+') as file:
file.write(data['binary'])
image = Image.open(PATH.format(data['name']))
new_img = image.resize((128, 128))
new_img.save(PATH.format(data['name']))
with open(PATH.format(data['name']), 'rb') as image_file:
image = image_file.read()
os.remove(PATH.format(data['name']))
return image
| [
0,
1,
2,
3,
4
] |
1,296 | 927b42326ad62f5e484fd7016c42a44b93609f83 | <mask token>
| <mask token>
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
<mask token>
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
| <mask token>
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}
current_abpath = abspath(dirname(__file__)) + '/'
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + 'imgs\\'
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
| from os.path import dirname, abspath
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}
current_abpath = abspath(dirname(__file__)) + '/'
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + 'imgs\\'
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
| #!/usr/bin/python
# coding: utf-8
from os.path import dirname, abspath
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {
"legendary": ["#02CE01", # set
"#BF642F"], # legndary
"rare": ["#BBBB00"]
}
current_abpath = abspath(dirname(__file__)) + "/"
# With py2exe the dirname is INSTPATH/server/library.zip. So
# current_abpath will be INSTPATH/server/library.zip/
if current_abpath[-12:] == "library.zip/":
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + "imgs\\"
def get_item_colors():
'''
>>> get_item_colors()
'''
result = []
if not PICKITEMSP: return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS["legendary"]
return result
| [
0,
2,
3,
4,
5
] |
1,297 | 65aa761110877bd93c2d2cb3d097fa3e126f72b1 | from application.processing_data.twitter import TwitterAPIv2
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
from .twitter import TwitterAPIv2
categories={
'Noise Complaints': {
'loud',
'party',
'noisy',
'noise',
'hear',
'music',
},
'Animal Services' : {
'dog',
'cat',
'bird',
'rabbit',
'dead',
},
'Un-Sanitary conditions': {
'dirty',
'trash',
'mess',
'gross',
'litter',
},
'Water Infrastructure' : {
},
'Broken Roads' : {
}
}
def authenticate_client():
key = <key>
endpoint = 'https://textanalysishackathon.cognitiveservices.azure.com/'
ta_credential = AzureKeyCredential(key)
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint, credential=ta_credential)
return text_analytics_client
def filterNegativeTweets(tweets):
client = authenticate_client()
negative_tweets = []
documents = []
for tweet in tweets:
documents.append(tweet['text'])
response = client.analyze_sentiment(documents=documents)
twitterAPI = TwitterAPIv2()
result = [doc for doc in response if not doc.is_error]
#Iterate over the tweets and match them to the response values
for tweet in tweets:
for document in result:
#Tweet matches the document
if document.sentences[0].text in tweet['text']:
#if document is negative, save both the tweet, document, and get the keyphrases
if document.confidence_scores.negative >= 0.5:
negative_tweets.append({
'tweet': tweet,
'sentiment': document,
'key_phrases': client.extract_key_phrases(documents=[tweet['text']])[0],
'tweet_location_data' : twitterAPI.get_tweet_with_id_location(tweet['id'])
})
break
return negative_tweets
| null | null | null | null | [
0
] |
1,298 | 0ee902d59d3d01b6ec8bb4cc8d5e8aa583644397 | <mask token>
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(
miu)
return kde_analyzer
<mask token>
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.
reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)
)
return innner_product
<mask token>
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M) ** 2, axis=2)
return ot.emd2(a, b, M)
<mask token>
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
<mask token>
| <mask token>
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(
miu)
return kde_analyzer
<mask token>
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.
reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)
)
return innner_product
def grad_of_function(input_samples, network):
g_of_y = network(input_samples).sum()
gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]
return gradient
<mask token>
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M) ** 2, axis=2)
return ot.emd2(a, b, M)
<mask token>
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
<mask token>
| <mask token>
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(
miu)
return kde_analyzer
<mask token>
def second_moment_single_dist(batch_dim):
return batch_dim.pow(2).sum(dim=1).mean()
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.
reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)
)
return innner_product
def grad_of_function(input_samples, network):
g_of_y = network(input_samples).sum()
gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]
return gradient
def two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight,
idx_dist):
n_dist = dist_weight.shape[0]
f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()
for j in range(n_dist):
f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()
inner_product = inprod_average(grad_g_of_y, miu_i)
half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)
loss_gi = (f_grad_g_y - inner_product + half_moment_grad_of_g
) * dist_weight[idx_dist]
return loss_gi
<mask token>
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M) ** 2, axis=2)
return ot.emd2(a, b, M)
<mask token>
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
<mask token>
def average_nn(args, **kwargs):
averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])
tmp_data = averaged_parameters
n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)
for i in range(args.NUM_DISTRIBUTION):
model_param = io.load(args.get_nn(**kwargs) +
f'/subset_{i + 1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt'
)
assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]
tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])
tmp_data[:, -1] = PTU.torch2numpy(model_param['last_layer.weight'].
squeeze())
if i == args.NUM_DISTRIBUTION - 1:
averaged_parameters[i * n_samp_of_subset:] = tmp_data[i *
n_samp_of_subset:]
else:
averaged_parameters[i * n_samp_of_subset:(i + 1) * n_samp_of_subset
] = tmp_data[i * n_samp_of_subset:(i + 1) * n_samp_of_subset]
return averaged_parameters
<mask token>
def get_marginal_list(cfg, type_data='2block'):
if type_data == '2block':
marginal_data = g_data.marginal_data_blocks_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'circ_squa':
marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(cfg)[:, :,
:-1]
elif type_data == 'mnist0-1':
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)
elif type_data == '3digit':
marginal_data = g_data.marginal_data_3digit_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'ellipse':
marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(cfg)[:, :, :-1
]
elif type_data == 'line':
marginal_data = g_data.marginal_data_line_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'usps_mnist':
marginal_data = g_data.marginal_usps_3loop_ficnn_handle(cfg)[0][
torch.randperm(5000), :, :-1]
elif type_data == 'mnist_group':
if cfg.N_TEST == 25:
idx_digit = torch.zeros(25).long()
for idx in range(5):
idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[
idx_digit]
else:
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[torch
.randperm(25000)]
elif type_data == 'cifar':
marginal_data = g_data.marginal_cifar_handle(cfg)
elif type_data == 'gmm':
marginal_data = g_data.marginal_data_gmm_3loop_ficnn(cfg)[:, :, :-1]
return marginal_data.permute(2, 0, 1)
| from __future__ import print_function
import ot
import torch
import numpy as np
from sklearn.neighbors import KernelDensity
from torch.utils.data import Dataset
import jacinle.io as io
import optimal_transport_modules.pytorch_utils as PTU
import optimal_transport_modules.generate_data as g_data
from optimal_transport_modules.record_mean_cov import select_mean_and_cov
<mask token>
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(
miu)
return kde_analyzer
def second_moment_no_average(batch_dim):
return batch_dim.pow(2).sum(dim=1)
def second_moment_single_dist(batch_dim):
return batch_dim.pow(2).sum(dim=1).mean()
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.
reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1), batch_dim_2.reshape(-1)
)
return innner_product
def grad_of_function(input_samples, network):
g_of_y = network(input_samples).sum()
gradient = torch.autograd.grad(g_of_y, input_samples, create_graph=True)[0]
return gradient
def two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight,
idx_dist):
n_dist = dist_weight.shape[0]
f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()
for j in range(n_dist):
f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()
inner_product = inprod_average(grad_g_of_y, miu_i)
half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)
loss_gi = (f_grad_g_y - inner_product + half_moment_grad_of_g
) * dist_weight[idx_dist]
return loss_gi
<mask token>
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M) ** 2, axis=2)
return ot.emd2(a, b, M)
def free_support_barycenter(measures_locations, measures_weights, X_init, b
=None, weights=None, numItermax=100, stopThr=1e-07, use_sinkhorn=False):
g_sinkhorn_reg = 0.1
iter_count = 0
N = len(measures_locations)
k = X_init.shape[0]
d = X_init.shape[1]
if b is None:
b = np.ones((k,)) / k
if weights is None:
weights = np.ones((N,)) / N
X = X_init
log_dict = {}
displacement_square_norm = stopThr + 1.0
while displacement_square_norm > stopThr and iter_count < numItermax:
T_sum = np.zeros((k, d))
for measure_locations_i, measure_weights_i, weight_i in zip(
measures_locations, measures_weights, weights.tolist()):
M_i = ot.dist(X, measure_locations_i)
if use_sinkhorn:
T_i = ot.bregman.sinkhorn(b, measure_weights_i, M_i,
g_sinkhorn_reg)
else:
T_i = ot.emd(b, measure_weights_i, M_i)
T_sum = T_sum + weight_i * np.reshape(1.0 / b, (-1, 1)
) * np.matmul(T_i, measure_locations_i)
displacement_square_norm = np.sum(np.square(T_sum - X))
X = T_sum
print('iteration %d, displacement_square_norm=%f\n', iter_count,
displacement_square_norm)
iter_count += 1
return X
<mask token>
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
<mask token>
def get_gmm_param(trial, cond=-1):
if cond > 0:
MEAN, COV = select_mean_and_cov(trial, range_cond=cond)
else:
MEAN, COV = select_mean_and_cov(trial)
INPUT_DIM = MEAN[0].shape[1]
OUTPUT_DIM = INPUT_DIM
NUM_DISTRIBUTION = len(MEAN)
NUM_GMM_COMPONENT = []
for i in range(NUM_DISTRIBUTION):
NUM_GMM_COMPONENT.append(MEAN[i].shape[0])
high_dim_flag = INPUT_DIM > 2
return (MEAN, COV, INPUT_DIM, OUTPUT_DIM, NUM_DISTRIBUTION,
NUM_GMM_COMPONENT, high_dim_flag)
<mask token>
def average_nn(args, **kwargs):
averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])
tmp_data = averaged_parameters
n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)
for i in range(args.NUM_DISTRIBUTION):
model_param = io.load(args.get_nn(**kwargs) +
f'/subset_{i + 1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt'
)
assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]
tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])
tmp_data[:, -1] = PTU.torch2numpy(model_param['last_layer.weight'].
squeeze())
if i == args.NUM_DISTRIBUTION - 1:
averaged_parameters[i * n_samp_of_subset:] = tmp_data[i *
n_samp_of_subset:]
else:
averaged_parameters[i * n_samp_of_subset:(i + 1) * n_samp_of_subset
] = tmp_data[i * n_samp_of_subset:(i + 1) * n_samp_of_subset]
return averaged_parameters
<mask token>
def get_marginal_list(cfg, type_data='2block'):
if type_data == '2block':
marginal_data = g_data.marginal_data_blocks_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'circ_squa':
marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(cfg)[:, :,
:-1]
elif type_data == 'mnist0-1':
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)
elif type_data == '3digit':
marginal_data = g_data.marginal_data_3digit_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'ellipse':
marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(cfg)[:, :, :-1
]
elif type_data == 'line':
marginal_data = g_data.marginal_data_line_3loop_ficnn(cfg)[:, :, :-1]
elif type_data == 'usps_mnist':
marginal_data = g_data.marginal_usps_3loop_ficnn_handle(cfg)[0][
torch.randperm(5000), :, :-1]
elif type_data == 'mnist_group':
if cfg.N_TEST == 25:
idx_digit = torch.zeros(25).long()
for idx in range(5):
idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[
idx_digit]
else:
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(cfg)[torch
.randperm(25000)]
elif type_data == 'cifar':
marginal_data = g_data.marginal_cifar_handle(cfg)
elif type_data == 'gmm':
marginal_data = g_data.marginal_data_gmm_3loop_ficnn(cfg)[:, :, :-1]
return marginal_data.permute(2, 0, 1)
| from __future__ import print_function
import ot
import torch
import numpy as np
from sklearn.neighbors import KernelDensity
from torch.utils.data import Dataset
import jacinle.io as io
import optimal_transport_modules.pytorch_utils as PTU
import optimal_transport_modules.generate_data as g_data
from optimal_transport_modules.record_mean_cov import select_mean_and_cov
'''
PyTorch type
'''
def kde_Gaussian_fitting(miu, bandwidth):
kde_analyzer = KernelDensity(
kernel='gaussian', bandwidth=bandwidth).fit(miu)
return kde_analyzer
def second_moment_no_average(batch_dim):
return batch_dim.pow(2).sum(dim=1)
def second_moment_single_dist(batch_dim):
return batch_dim.pow(2).sum(dim=1).mean()
def second_moment_all_dist(batch_dim_dist):
return batch_dim_dist.pow(2).sum(dim=1).mean(dim=0)
def inprod_average(batch_dim_1, batch_dim_2):
assert batch_dim_1.shape[0] == batch_dim_2.shape[0]
batch_size = batch_dim_1.shape[0]
inner_product_avg = torch.dot(batch_dim_1.reshape(-1),
batch_dim_2.reshape(-1)) / batch_size
return inner_product_avg
def inprod(batch_dim_1, batch_dim_2):
innner_product = torch.dot(batch_dim_1.reshape(-1),
batch_dim_2.reshape(-1))
return innner_product
def grad_of_function(input_samples, network):
g_of_y = network(input_samples).sum()
gradient = torch.autograd.grad(
g_of_y, input_samples, create_graph=True)[0]
return gradient
def two_loop_loss_in_W2(convex_f_list, grad_g_of_y, miu_i, dist_weight, idx_dist):
n_dist = dist_weight.shape[0]
#! The 2nd loss part useful for f/g parameters
f_grad_g_y = convex_f_list[idx_dist](grad_g_of_y).mean()
#! The 4th loss part useful for f/g parameters
for j in range(n_dist):
f_grad_g_y -= dist_weight[j] * convex_f_list[j](grad_g_of_y).mean()
#! The 1st loss part useful for g parameters
inner_product = inprod_average(grad_g_of_y, miu_i)
#! The 3rd loss part useful for g parameters
half_moment_grad_of_g = 0.5 * second_moment_single_dist(grad_g_of_y)
loss_gi = (f_grad_g_y - inner_product +
half_moment_grad_of_g) * dist_weight[idx_dist]
return loss_gi
'''
localized POT library
'''
def w2_distance_samples_solver(sample1_n_d, sample2_n_d):
# see here for details
# https://pythonot.github.io/all.html#ot.emd
# https://pythonot.github.io/all.html#ot.emd2
assert sample1_n_d.shape == sample2_n_d.shape
num_sample = sample1_n_d.shape[0]
a = np.ones([num_sample]) / num_sample
b = np.ones([num_sample]) / num_sample
tmp_marginal_1 = np.expand_dims(sample1_n_d, axis=0)
tmp_marginal_2 = np.expand_dims(sample2_n_d, axis=1)
M = tmp_marginal_1 - tmp_marginal_2
M = np.sum(np.abs(M)**2, axis=2)
return ot.emd2(a, b, M)
def free_support_barycenter(measures_locations, measures_weights, X_init, b=None, weights=None, numItermax=100, stopThr=1e-7, use_sinkhorn=False):
g_sinkhorn_reg = 0.1
iter_count = 0
N = len(measures_locations)
k = X_init.shape[0]
d = X_init.shape[1]
if b is None:
b = np.ones((k,)) / k
if weights is None:
weights = np.ones((N,)) / N
X = X_init
log_dict = {}
displacement_square_norm = stopThr + 1.
while (displacement_square_norm > stopThr and iter_count < numItermax):
T_sum = np.zeros((k, d))
for (measure_locations_i, measure_weights_i, weight_i) in zip(measures_locations, measures_weights, weights.tolist()):
M_i = ot.dist(X, measure_locations_i)
if use_sinkhorn:
T_i = ot.bregman.sinkhorn(
b, measure_weights_i, M_i, g_sinkhorn_reg)
else:
T_i = ot.emd(b, measure_weights_i, M_i)
T_sum = T_sum + weight_i * \
np.reshape(1. / b, (-1, 1)) * \
np.matmul(T_i, measure_locations_i)
displacement_square_norm = np.sum(np.square(T_sum - X))
X = T_sum
print('iteration %d, displacement_square_norm=%f\n',
iter_count, displacement_square_norm)
iter_count += 1
return X
'''
MNIST utils
'''
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
# def extract_three_number(total_data):
# idx_train = (total_data.targets == 0) + (total_data.targets ==
# 1) + (total_data.targets == 7)
# total_data.targets = total_data.targets[idx_train]
# total_data.data = total_data.data[idx_train]
# return total_data
class CustomMnistDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = data
self.target = target
self.transform = transform
def __len__(self):
assert len(self.target) == len(self.data)
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data_idxed = self.data[idx]
target_idxed = self.target[idx].float()
# sample = {'data': data_idxed, 'target': target_idxed}
if self.transform:
data_idxed = self.transform(data_idxed)
return [data_idxed, target_idxed]
'''
Gaussian utils
'''
def get_gmm_param(trial, cond=-1):
if cond > 0:
MEAN, COV = select_mean_and_cov(trial, range_cond=cond)
else:
MEAN, COV = select_mean_and_cov(trial)
INPUT_DIM = MEAN[0].shape[1]
OUTPUT_DIM = INPUT_DIM
NUM_DISTRIBUTION = len(MEAN)
NUM_GMM_COMPONENT = []
for i in range(NUM_DISTRIBUTION):
NUM_GMM_COMPONENT.append(MEAN[i].shape[0])
high_dim_flag = INPUT_DIM > 2
return MEAN, COV, INPUT_DIM, OUTPUT_DIM, NUM_DISTRIBUTION, NUM_GMM_COMPONENT, high_dim_flag
'''
Average the 2 layer neural networks
'''
def average_nn(args, **kwargs):
averaged_parameters = np.zeros([args.N_SAMPLES, args.INPUT_DIM])
tmp_data = averaged_parameters
n_samp_of_subset = int(args.N_SAMPLES / args.NUM_DISTRIBUTION)
for i in range(args.NUM_DISTRIBUTION):
model_param = io.load(args.get_nn(**kwargs) +
f"/subset_{i+1}_samples_{args.subset_samples}/trial_26/storing_models/nn_2layer_epoch200.pt")
assert args.N_SAMPLES == model_param['layer1.weight'].shape[0]
tmp_data[:, :-1] = PTU.torch2numpy(model_param['layer1.weight'])
tmp_data[:, -
1] = PTU.torch2numpy(model_param['last_layer.weight'].squeeze())
if i == args.NUM_DISTRIBUTION - 1:
averaged_parameters[(i * n_samp_of_subset)
:] = tmp_data[(i * n_samp_of_subset):]
else:
averaged_parameters[i * n_samp_of_subset:
(i + 1) * n_samp_of_subset] = tmp_data[i * n_samp_of_subset:
(i + 1) * n_samp_of_subset]
return averaged_parameters
'''
get marginal data handle
'''
def get_marginal_list(cfg, type_data='2block'):
if type_data == '2block':
marginal_data = g_data.marginal_data_blocks_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'circ_squa':
marginal_data = g_data.marginal_data_circ_squ_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'mnist0-1':
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)
elif type_data == '3digit':
marginal_data = g_data.marginal_data_3digit_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'ellipse':
marginal_data = g_data.marginal_data_ellipse_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'line':
marginal_data = g_data.marginal_data_line_3loop_ficnn(
cfg)[:, :, :-1]
elif type_data == 'usps_mnist':
marginal_data = g_data.marginal_usps_3loop_ficnn_handle(
cfg)[0][torch.randperm(5000), :, :-1]
elif type_data == 'mnist_group':
if cfg.N_TEST == 25:
idx_digit = torch.zeros(25).long()
for idx in range(5):
idx_digit[idx * 5:(idx + 1) * 5] = 5000 * idx + torch.arange(5)
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)[idx_digit]
else:
marginal_data = g_data.marginal_mnist_3loop_ficnn_handle(
cfg)[torch.randperm(25000)]
elif type_data == 'cifar':
marginal_data = g_data.marginal_cifar_handle(cfg)
elif type_data == 'gmm':
marginal_data = g_data.marginal_data_gmm_3loop_ficnn(
cfg)[:, :, :-1]
return marginal_data.permute(2, 0, 1)
| [
12,
13,
17,
21,
22
] |
1,299 | de0d0588106ab651a8d6141a44cd9e286b0ad3a5 | <mask token>
class ClientTaskStatus(object):
<mask token>
<mask token>
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
| <mask token>
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
<mask token>
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
| <mask token>
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')
).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
tasking = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))
)
self._taskstatus['tasknewcnt'] = len(tasking)
taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=
ESqlComb.Or)))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
taskdownloading = self._sqlres.query_task(SqlConditions(
SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.
value, comb=ESqlComb.Or)))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
| <mask token>
from datetime import datetime
import time
import traceback
import pytz
from datacontract import ETaskStatus
from datacontract.clientstatus.statustask import StatusTask
from idownclient.clientdbmanager import DbManager
from idownclient.config_task import clienttaskconfig
from outputmanagement import OutputManagement
from ..clientdbmanager.sqlcondition import ESqlComb, SqlCondition, SqlConditions
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')
).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
tasking = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))
)
self._taskstatus['tasknewcnt'] = len(tasking)
taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=
ESqlComb.Or)))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
taskdownloading = self._sqlres.query_task(SqlConditions(
SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.
value, comb=ESqlComb.Or)))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
| """
采集端任务状态统计
直接在数据库查找数据
create by judy 2018/10/22
update by judy 2019/03/05
更改统一输出为output
"""
from datetime import datetime
import time
import traceback
import pytz
from datacontract import ETaskStatus
from datacontract.clientstatus.statustask import StatusTask
from idownclient.clientdbmanager import DbManager
from idownclient.config_task import clienttaskconfig
from outputmanagement import OutputManagement
from ..clientdbmanager.sqlcondition import (ESqlComb, SqlCondition,
SqlConditions)
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times # 默认是5秒搜集一次
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
# 正在执行任务的数量
# tasking = self._sqlres.query_task('taskstatus', ETaskStatus.New.value)
tasking = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.New.value,
comb=ESqlComb.Or),
))
self._taskstatus['tasknewcnt'] = len(tasking)
# taskwaiting = self._sqlres.query_task('taskstatus', ETaskStatus.WaitForDeal.value)
taskwaiting = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.WaitForDeal.value,
comb=ESqlComb.Or),
))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
# taskdownloading = self._sqlres.query_task('taskstatus', ETaskStatus.Downloading.value)
taskdownloading = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.Downloading.value,
comb=ESqlComb.Or),
))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f"Collect taskinfo error,err:{traceback.format_exc()}")
finally:
time.sleep(self.times)
| [
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.