max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_authentication.py | movermeyer/cellardoor | 0 | 4500 | import unittest
from mock import Mock
import base64
from cellardoor import errors
from cellardoor.authentication import *
from cellardoor.authentication.basic import BasicAuthIdentifier
class FooIdentifier(Identifier):
pass
class BarAuthenticator(Authenticator):
pass
class TestAuthentication(unittest.TestCase):
def test_abstract_identifier(self):
id = Identifier()
with self.assertRaises(NotImplementedError):
id.identify({})
def test_abstract_authenticator(self):
auth = Authenticator()
with self.assertRaises(NotImplementedError):
auth.authenticate({})
def test_bad_identifier(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())])
def test_bad_authenticator(self):
self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)])
def test_middleware(self):
identifier = FooIdentifier()
identifier.identify = Mock(return_value='foo')
authenticator = BarAuthenticator()
authenticator.authenticate = Mock(return_value='bar')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)])
environ = {'skidoo':23}
middleware(environ, lambda: None)
identifier.identify.assert_called_once_with(environ)
authenticator.authenticate.assert_called_once_with('foo')
self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'})
def test_middleware_skip(self):
id_one = FooIdentifier()
id_one.identify = Mock(return_value=None)
id_two = FooIdentifier()
id_two.identify = Mock(return_value='two')
id_three = FooIdentifier()
id_three.identify = Mock(return_value='three')
auth_one = BarAuthenticator()
auth_one.authenticate = Mock(return_value='one')
auth_two = BarAuthenticator()
auth_two.authenticate = Mock(return_value='two')
auth_three = BarAuthenticator()
auth_three.authenticate = Mock(return_value='three')
app = Mock(return_value=[])
middleware = AuthenticationMiddleware(
app,
pairs=[
(id_one, auth_one),
(id_two, auth_two),
(id_three, auth_three)
]
)
environ = {}
middleware(environ, lambda: None)
self.assertEquals(environ, {'cellardoor.identity':'two'})
class TestBasic(unittest.TestCase):
def test_skip_if_no_auth_header(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({})
self.assertEquals(credentials, None)
def test_skip_if_not_a_pair(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'})
self.assertEquals(credentials, None)
def test_skip_if_not_basic(self):
identifier = BasicAuthIdentifier()
credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'})
self.assertEquals(credentials, None)
def test_error_if_not_base64(self):
identifier = BasicAuthIdentifier()
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic \x000'})
def test_error_if_malformed(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foobar')
with self.assertRaises(errors.IdentificationError):
identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
def test_pass(self):
identifier = BasicAuthIdentifier()
credentials = base64.standard_b64encode('foo:bar')
identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})
self.assertEquals(identified_credentials, {'username':'foo', 'password':'<PASSWORD>'})
| 2.859375 | 3 |
src/styleaug/__init__.py | somritabanerjee/speedplusbaseline | 69 | 4501 | <reponame>somritabanerjee/speedplusbaseline<filename>src/styleaug/__init__.py
from .styleAugmentor import StyleAugmentor | 1.140625 | 1 |
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py | Westlake-AI/openmixup | 10 | 4502 | _base_ = [
'../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py',
'../../../_base_/default_runtime.py',
]
# model settings
model = dict(
type='MixUpClassification',
pretrained=None,
alpha=0.2,
mix_mode="cutmix",
mix_args=dict(
attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained)
automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock
fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False),
manifoldmix=dict(layer=(0, 3)),
puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory
mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training
beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8),
resizemix=dict(scope=(0.1, 0.8), use_alpha=True),
samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock
),
backbone=dict(
type='ConvNeXt',
arch='tiny',
out_indices=(3,),
norm_cfg=dict(type='LN2d', eps=1e-6),
act_cfg=dict(type='GELU'),
drop_path_rate=0.1,
gap_before_final_norm=True,
),
head=dict(
type='ClsMixupHead', # mixup CE + label smooth
loss=dict(type='LabelSmoothLoss',
label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0),
with_avg_pool=False, # gap_before_final_norm is True
in_channels=768, num_classes=1000)
)
# interval for accumulate gradient
update_interval = 2 # total: 8 x bs256 x 2 accumulates = bs4096
# additional hooks
custom_hooks = [
dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W + m * W
momentum=0.9999,
warmup='linear',
warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs.
update_interval=update_interval,
),
]
# optimizer
optimizer = dict(
type='AdamW',
lr=4e-3, # lr = 5e-4 * (256 * 4) * 4 accumulate / 1024 = 4e-3 / bs4096
weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999),
paramwise_options={
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
})
# apex
use_fp16 = True
fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))
optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16)
# lr scheduler
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False, min_lr=1e-5,
warmup='linear',
warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs.
warmup_ratio=1e-6,
)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=300)
| 1.398438 | 1 |
mcstasscript/interface/reader.py | PaNOSC-ViNYL/McStasScript | 3 | 4503 | import os
from mcstasscript.instr_reader.control import InstrumentReader
from mcstasscript.interface.instr import McStas_instr
class McStas_file:
"""
Reader of McStas files, can add to an existing McStasScript
instrument instance or create a corresponding McStasScript python
file.
Methods
-------
add_to_instr(Instr)
Add information from McStas file to McStasScript Instr instance
write_python_file(filename)
Write python file named filename that reproduce the McStas instr
"""
def __init__(self, filename):
"""
Initialization of McStas_file class, needs McStas instr filename
Parameters
----------
filename (str)
Name of McStas instrument file to be read
"""
# Check filename
if not os.path.isfile(filename):
raise ValueError("Given filename, \"" + filename
+ "\" could not be found.")
self.Reader = InstrumentReader(filename)
def add_to_instr(self, Instr):
"""
Adds information from the McStas file to McStasScript instr
Parameters
----------
Instr (McStasScript McStas_instr instance)
McStas_instr instance to add instrument information to
"""
# Check Instr
if not isinstance(Instr, McStas_instr):
raise TypeError("Given object is not of type McStas_instr!")
self.Reader.add_to_instr(Instr)
def write_python_file(self, filename, **kwargs):
"""
Writes python file that reproduces McStas instrument file
Parameters
----------
filename (str)
Filename of python file to be written
"""
if "force" in kwargs:
force = kwargs["force"]
else:
force = False
# Check product_filename is available
if os.path.isfile(filename):
if force:
os.remove(filename)
else:
raise ValueError("Filename \"" + filename
+ "\" already exists, you can overwrite with "
+ "force=True")
self.Reader.generate_py_version(filename)
| 3.125 | 3 |
src/regrtest.py | ucsd-progsys/csolve-bak | 0 | 4504 | <reponame>ucsd-progsys/csolve-bak<filename>src/regrtest.py
#!/usr/bin/python
# Copyright (c) 2009 The Regents of the University of California. All rights reserved.
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that the
# above copyright notice and the following two paragraphs appear in
# all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION
# TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
import time, subprocess, optparse, sys, socket, os
import misc.rtest as rtest
solve = "./csolve -c".split()
null = open("/dev/null", "w")
now = (time.asctime(time.localtime(time.time()))).replace(" ","_")
logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now)
argcomment = "//! run with "
def logged_sys_call(args, out=None, err=None):
print "exec: " + " ".join(args)
return subprocess.call(args, stdout=out, stderr=err)
def solve_quals(file,bare,time,quiet,flags):
if quiet: out = null
else: out = None
if time: time = ["time"]
else: time = []
hygiene_flags = [("--csolveprefix=%s" % (file)), "-o", "/dev/null"]
out = open(file + ".log", "w")
rv = logged_sys_call(time + solve + flags + hygiene_flags + [file], out)
out.close()
return rv
def run_script(file,quiet):
if quiet: out = null
else: out = None
return logged_sys_call(file, out)
def getfileargs(file):
f = open(file)
l = f.readline()
f.close()
if l.startswith(argcomment):
return l[len(argcomment):].strip().split(" ")
else:
return []
class Config (rtest.TestConfig):
def __init__ (self, dargs, testdirs, logfile, threadcount):
rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount)
self.dargs = dargs
if os.path.exists("../tests/postests/coreutils/"):
logged_sys_call(["../tests/postests/coreutils/makeCoreUtil.sh", "init"], None)
def run_test (self, file):
os.environ['CSOLVEFLAGS'] = self.dargs
if file.endswith(".c"):
fargs = getfileargs(file)
return solve_quals(file, True, False, True, fargs)
elif file.endswith(".sh"):
return run_script(file, True)
def is_test (self, file):
return (file.endswith(".sh") and os.access(file, os.X_OK)) \
or (file.endswith(".c") and not file.endswith(".csolve.save.c") and not file.endswith(".ssa.c"))
#####################################################################################
#testdirs = [("../postests", 0)]
#testdirs = [("../negtests", 1)]
#testdirs = [("../slowtests", 1)]
#DEFAULT
testdirs = [("../tests/postests", 0), ("../tests/negtests", [1, 2])]
#testdirs = [("../tests/microtests", 0)]
parser = optparse.OptionParser()
parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads")
parser.add_option("-o", "--opts", dest="opts", default="", type=str, help="additional arguments to csolve")
parser.disable_interspersed_args()
options, args = parser.parse_args()
runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount))
exit (runner.run ())
| 1.734375 | 2 |
country_capital_guesser.py | NathanMH/ComputerClub | 0 | 4505 | #! /usr/bin/env python3
#######################
"""####################
Index:
1. Imports and Readme
2. Functions
3. Main
4. Testing
####################"""
#######################
###################################################################
# 1. IMPORTS AND README
###################################################################
import easygui
import country_list_getter
###################################################################
# 2. FUNCTIONS
###################################################################
# Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa)
country_list_getter.main()
COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST
def ask_to_play():
return easygui.ynbox("Do you want to play a game?", "Country Guesser", ("Yes", "No"))
def ask_to_replay(correct_answers, total_questions):
score = round(((correct_answers / total_questions) * 100), 2)
if score >= 50:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/happy_puppy.jpg", ["Yes", "No"])
else:
return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/sad_puppy.jpg", ["Yes", "No"])
def main_question_box(country):
return easygui.enterbox("What is the capital of: " + country + "?", "Country Capital Guesser!!")
###################################################################
# 3. MAIN
###################################################################
def funtime():
playing = 1
correct_answers = 0
total_questions = 0
ask_to_play()
while playing:
for key, value in COUNTRIES_CAPITALS.items():
answer = main_question_box(key)
# answer = input("Name the capital of: " + key + "\n").lower()
total_questions += 1 # Short for total_questions = total_questions + 1
if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]:
correct_answers += 1
print("Correct!")
else:
print("Wrong!")
# Should we keep playing?
response = input("Would you like to play again?: \n")
if response.lower() == "yes" or response == "y":
playing = 1
else:
playing = 0
#score_screen(correct_answers, total_questions)
ask_to_replay(correct_answers, total_questions)
#print("You scored " + str(correct_answers)+ "/" + str(total_questions) + " (" + str(correct_percent) + "%)")
###################################################################
# 4. TESTING
###################################################################
# COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"}
def test_1():
pass
# ask_to_play()
# main_question_box("Canada")
funtime()
| 3.09375 | 3 |
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py | aframires/freesound-loop-annotator | 18 | 4506 | # Need this to import from parent directory when running outside pycharm
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from ac_utils.general import save_to_json, load_from_json
import click
import xml.etree.ElementTree
from urllib import unquote
def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file):
collection = rekordbox_file.find('COLLECTION')
found = False
for document in collection:
if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]:
found = document
break
if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]):
found = document
break
return found
@click.command()
@click.argument('dataset_path')
def rekordbox_file_to_analysis_file(dataset_path):
"""
Read information from rekordbox_rhythm.xml present in dataset_path and convert it into
analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible with our evaluation
framework.
"""
rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot()
metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json'))
out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json')
analysis = dict()
with click.progressbar(metadata_file.keys(), label="Converting...") as metadata_keys:
for key in metadata_keys:
entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file)
if entry is not False:
tempo_entry = entry.find('TEMPO')
if tempo_entry is not None:
bpm_raw = float(tempo_entry.attrib['Bpm'])
else:
bpm_raw = 0.0
analysis[key] = {"RekBox": {
"bpm": bpm_raw,
}
}
save_to_json(out_file_path, analysis, verbose=True)
if __name__ == '__main__':
rekordbox_file_to_analysis_file()
| 2.375 | 2 |
inventree/part.py | SergeoLacruz/inventree-python | 0 | 4507 | <filename>inventree/part.py
# -*- coding: utf-8 -*-
import logging
import re
import inventree.base
import inventree.stock
import inventree.company
import inventree.build
logger = logging.getLogger('inventree')
class PartCategory(inventree.base.InventreeObject):
""" Class representing the PartCategory database model """
URL = 'part/category'
def getParts(self, **kwargs):
return Part.list(self._api, category=self.pk, **kwargs)
def getParentCategory(self):
if self.parent:
return PartCategory(self._api, self.parent)
else:
return None
def getChildCategories(self, **kwargs):
return PartCategory.list(self._api, parent=self.pk, **kwargs)
def get_category_parameter_templates(self, fetch_parent=True):
"""
fetch_parent: enable to fetch templates for parent categories
"""
parameters_url = f'part/category/{self.pk}/parameters'
return self.list(self._api,
url=parameters_url,
fetch_parent=fetch_parent)
class Part(inventree.base.ImageMixin, inventree.base.InventreeObject):
""" Class representing the Part database model """
URL = 'part'
def getCategory(self):
""" Return the part category associated with this part """
return PartCategory(self._api, self.category)
def getTestTemplates(self):
""" Return all test templates associated with this part """
return PartTestTemplate.list(self._api, part=self.pk)
def getSupplierParts(self):
""" Return the supplier parts associated with this part """
return inventree.company.SupplierPart.list(self._api, part=self.pk)
def getBomItems(self):
""" Return the items required to make this part """
return BomItem.list(self._api, part=self.pk)
def isUsedIn(self):
""" Return a list of all the parts this part is used in """
return BomItem.list(self._api, sub_part=self.pk)
def getBuilds(self, **kwargs):
""" Return the builds associated with this part """
return inventree.build.Build.list(self._api, part=self.pk, **kwargs)
def getStockItems(self):
""" Return the stock items associated with this part """
return inventree.stock.StockItem.list(self._api, part=self.pk)
def getParameters(self):
""" Return parameters associated with this part """
return Parameter.list(self._api, part=self.pk)
def getRelated(self):
""" Return related parts associated with this part """
return PartRelated.list(self._api, part=self.pk)
def getInternalPriceList(self):
"""
Returns the InternalPrice list for this part
"""
return InternalPrice.list(self._api, part=self.pk)
def setInternalPrice(self, quantity: int, price: float):
"""
Set the internal price for this part
"""
return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price)
def getAttachments(self):
return PartAttachment.list(self._api, part=self.pk)
def uploadAttachment(self, attachment, comment=''):
"""
Upload an attachment (file) against this Part.
Args:
attachment: Either a string (filename) or a file object
comment: Attachment comment
"""
return PartAttachment.upload(
self._api,
attachment,
comment=comment,
part=self.pk
)
class PartAttachment(inventree.base.Attachment):
""" Class representing a file attachment for a Part """
URL = 'part/attachment'
REQUIRED_KWARGS = ['part']
class PartTestTemplate(inventree.base.InventreeObject):
""" Class representing a test template for a Part """
URL = 'part/test-template'
@classmethod
def generateTestKey(cls, test_name):
""" Generate a 'key' for this test """
key = test_name.strip().lower()
key = key.replace(' ', '')
# Remove any characters that cannot be used to represent a variable
key = re.sub(r'[^a-zA-Z0-9]', '', key)
return key
def getTestKey(self):
return PartTestTemplate.generateTestKey(self.test_name)
class BomItem(inventree.base.InventreeObject):
""" Class representing the BomItem database model """
URL = 'bom'
class InternalPrice(inventree.base.InventreeObject):
""" Class representing the InternalPrice model """
URL = 'part/internal-price'
@classmethod
def setInternalPrice(cls, api, part, quantity: int, price: float):
"""
Set the internal price for this part
"""
data = {
'part': part,
'quantity': quantity,
'price': price,
}
# Send the data to the server
return api.post(cls.URL, data)
class PartRelated(inventree.base.InventreeObject):
""" Class representing a relationship between parts"""
URL = 'part/related'
@classmethod
def add_related(cls, api, part1, part2):
data = {
'part_1': part1,
'part_2': part2,
}
# Send the data to the server
if api.post(cls.URL, data):
logging.info("Related OK")
ret = True
else:
logging.warning("Related failed")
ret = False
return ret
class Parameter(inventree.base.InventreeObject):
"""class representing the Parameter database model """
URL = 'part/parameter'
def getunits(self):
""" Get the dimension and units for this parameter """
return [element for element
in ParameterTemplate.list(self._api)
if element['pk'] == self._data['template']]
class ParameterTemplate(inventree.base.InventreeObject):
""" class representing the Parameter Template database model"""
URL = 'part/parameter/template'
| 2.5625 | 3 |
tests/test_web_urldispatcher.py | avstarkov/aiohttp | 0 | 4508 | import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import abc, web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
@pytest.mark.parametrize(
"show_index,status,prefix,data",
[pytest.param(False, 403, '/', None, id="index_forbidden"),
pytest.param(True, 200, '/',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_root"),
pytest.param(True, 200, '/static',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/static/my_dir">my_dir/</a></li>\n'
b'<li><a href="/static/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_static")])
async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client,
show_index, status, prefix, data):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path, 'my_file')
with open(my_file_path, 'w') as fw:
fw.write('hello')
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write('world')
app = web.Application()
# Register global static route:
app.router.add_static(prefix, tmp_dir_path, show_index=show_index)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get(prefix)
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (await r.read())
assert read_ == data
async def test_follow_symlink(tmp_dir_path, aiohttp_client):
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write(data)
my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_symlink_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = os.path.join(tmp_dir_path, dir_name)
if dir_name:
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, filename)
with open(my_file_path, 'w') as fw:
fw.write(data)
app = web.Application()
url = os.path.join('/', dir_name, filename)
app.router.add_static('/', tmp_dir_path)
client = await aiohttp_client(app)
r = await client.get(url)
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
async def test_url_escaping(aiohttp_client, registered_path, request_url):
"""
Tests accessing a resource with
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(registered_path, handler)
client = await aiohttp_client(app)
r = await client.get(request_url)
assert r.status == 200
async def test_handler_metadata_persistence():
"""
Tests accessing metadata of a handler after registering it on the app
router.
"""
app = web.Application()
async def async_handler(request):
"""Doc"""
return web.Response()
def sync_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
with pytest.warns(DeprecationWarning):
app.router.add_get('/sync', sync_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client):
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_dir')
assert r.status == 403
async def test_access_symlink_loop(tmp_dir_path, aiohttp_client):
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_dir_path)
app = web.Application()
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink')
assert r.status == 404
async def test_access_special_resource(tmp_dir_path, aiohttp_client):
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/special')
assert r.status == 404
async def test_partialy_applied_handler(aiohttp_client):
app = web.Application()
async def handler(data, request):
return web.Response(body=data)
with pytest.warns(DeprecationWarning):
app.router.add_route('GET', '/', functools.partial(handler, b'hello'))
client = await aiohttp_client(app)
r = await client.get('/')
data = (await r.read())
assert data == b'hello'
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
async def test_412_is_returned(aiohttp_client):
class MyRouter(abc.AbstractRouter):
async def resolve(self, request):
raise web.HTTPPreconditionFailed()
app = web.Application(router=MyRouter())
client = await aiohttp_client(app)
resp = await client.get('/')
assert resp.status == 412
async def test_allow_head(aiohttp_client):
"""
Test allow_head on routes.
"""
app = web.Application()
async def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = await aiohttp_client(app)
r = await client.get('/a')
assert r.status == 200
await r.release()
r = await client.head('/a')
assert r.status == 200
await r.release()
r = await client.get('/b')
assert r.status == 200
await r.release()
r = await client.head('/b')
assert r.status == 405
await r.release()
@pytest.mark.parametrize("path", [
'/a',
'/{a}',
])
def test_reuse_last_added_resource(path):
"""
Test that adding a route with the same name and path of the last added
resource doesn't create a new resource.
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(path, handler, name="a")
app.router.add_post(path, handler, name="a")
assert len(app.router.resources()) == 1
def test_resource_raw_match():
app = web.Application()
async def handler(request):
return web.Response()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
async def test_add_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_view("/a", MyView)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_decorate_view(aiohttp_client):
routes = web.RouteTableDef()
@routes.view("/a")
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app = web.Application()
app.router.add_routes(routes)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_web_view(aiohttp_client):
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_routes([
web.view("/a", MyView)
])
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
| 2.5 | 2 |
R-GMM-VGAE/model_citeseer.py | nairouz/R-GAE | 26 | 4509 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>)
# @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering
# @License : MIT License
import torch
import numpy as np
import torch.nn as nn
import scipy.sparse as sp
import torch.nn.functional as F
from tqdm import tqdm
from torch.optim import Adam
from sklearn.mixture import GaussianMixture
from torch.optim.lr_scheduler import StepLR
from preprocessing import sparse_to_tuple
from sklearn.neighbors import NearestNeighbors
from sklearn import metrics
from munkres import Munkres
def random_uniform_init(input_dim, output_dim):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = torch.rand(input_dim, output_dim)*2*init_range - init_range
return nn.Parameter(initial)
def q_mat(X, centers, alpha=1.0):
X = X.detach().numpy()
centers = centers.detach().numpy()
if X.size == 0:
q = np.array([])
else:
q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha))
q = q ** ((alpha + 1.0) / 2.0)
q = np.transpose(np.transpose(q) / np.sum(q, axis=1))
return q
def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2):
unconf_indices = []
conf_indices = []
q = q_mat(emb, centers_emb, alpha=1.0)
confidence1 = q.max(1)
confidence2 = np.zeros((q.shape[0],))
a = np.argsort(q, axis=1)
for i in range(q.shape[0]):
confidence1[i] = q[i,a[i,-1]]
confidence2[i] = q[i,a[i,-2]]
if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2:
unconf_indices.append(i)
else:
conf_indices.append(i)
unconf_indices = np.asarray(unconf_indices, dtype=int)
conf_indices = np.asarray(conf_indices, dtype=int)
return unconf_indices, conf_indices
class clustering_metrics():
def __init__(self, true_label, predict_label):
self.true_label = true_label
self.pred_label = predict_label
def clusteringAcc(self):
# best mapping between true_label and predict label
l1 = list(set(self.true_label))
numclass1 = len(l1)
l2 = list(set(self.pred_label))
numclass2 = len(l2)
if numclass1 != numclass2:
print('Class Not equal, Error!!!!')
return 0
cost = np.zeros((numclass1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2]
cost[i][j] = len(mps_d)
# match two clustering results by Munkres algorithm
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
# get the match results
new_predict = np.zeros(len(self.pred_label))
for i, c in enumerate(l1):
# correponding label in l2:
c2 = l2[indexes[i][1]]
# ai is the index with label==c2 in the pred_label list
ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(self.true_label, new_predict)
f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')
precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')
recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')
f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')
precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')
recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')
return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro
def evaluationClusterModelFromLabel(self):
nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)
adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)
acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc()
print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore))
fh = open('recoder.txt', 'a')
fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) )
fh.write('\r\n')
fh.flush()
fh.close()
return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro
class GraphConvSparse(nn.Module):
def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs):
super(GraphConvSparse, self).__init__(**kwargs)
self.weight = random_uniform_init(input_dim, output_dim)
self.activation = activation
def forward(self, inputs, adj):
x = inputs
x = torch.mm(x,self.weight)
x = torch.mm(adj, x)
outputs = self.activation(x)
return outputs
class ReGMM_VGAE(nn.Module):
def __init__(self, **kwargs):
super(ReGMM_VGAE, self).__init__()
self.num_neurons = kwargs['num_neurons']
self.num_features = kwargs['num_features']
self.embedding_size = kwargs['embedding_size']
self.nClusters = kwargs['nClusters']
# VGAE training parameters
self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons)
self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)
# GMM training parameters
self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True)
self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)
def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset):
opti = Adam(self.parameters(), lr=lr)
epoch_bar = tqdm(range(epochs))
gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag')
for _ in epoch_bar:
opti.zero_grad()
_,_, z = self.encode(features, adj)
x_ = self.decode(z)
loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor)
loss.backward()
opti.step()
gmm.fit_predict(z.detach().numpy())
self.pi.data = torch.from_numpy(gmm.weights_)
self.mu_c.data = torch.from_numpy(gmm.means_)
self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_))
self.logstd = self.mean
def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1):
pi = self.pi
mu_c = self.mu_c
log_sigma2_c = self.log_sigma2_c
det = 1e-2
Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor)
Loss = Loss * features.size(0)
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det
yita_c = yita_c / (yita_c.sum(1).view(-1,1))
KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+
torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+
(z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1))
Loss1 = KL1
KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1))
Loss1 -= KL2
return Loss, Loss1, Loss+Loss1
def generate_centers(self, emb_unconf):
y_pred = self.predict(emb_unconf)
nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy())
_, indices = nn.kneighbors(self.mu_c.detach().numpy())
return indices[y_pred]
def update_graph(self, adj, labels, emb, unconf_indices, conf_indices):
k = 0
y_pred = self.predict(emb)
emb_unconf = emb[unconf_indices]
adj = adj.tolil()
idx = unconf_indices[self.generate_centers(emb_unconf)]
for i, k in enumerate(unconf_indices):
adj_k = adj[k].tocsr().indices
if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) :
adj[k, idx[i]] = 1
for j in adj_k:
if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]):
adj[k, j] = 0
adj = adj.tocsr()
adj_label = adj + sp.eye(adj.shape[0])
adj_label = sparse_to_tuple(adj_label)
adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T),
torch.FloatTensor(adj_label[1]),
torch.Size(adj_label[2]))
weight_mask = adj_label.to_dense().view(-1) == 1
weight_tensor = torch.ones(weight_mask.size(0))
pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
weight_tensor[weight_mask] = pos_weight_orig
return adj, adj_label, weight_tensor
def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset):
self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk'))
opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089)
lr_s = StepLR(opti, step_size=10, gamma=0.9)
import os, csv
epoch_bar = tqdm(range(epochs))
previous_unconflicted = []
previous_conflicted = []
epoch_stable = 0
for epoch in epoch_bar:
opti.zero_grad()
z_mu, z_sigma2_log, emb = self.encode(features, adj_norm)
x_ = self.decode(emb)
unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2)
if epoch == 0:
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
if len(previous_unconflicted) < len(unconflicted_ind) :
z_mu = z_mu[unconflicted_ind]
z_sigma2_log = z_sigma2_log[unconflicted_ind]
emb_unconf = emb[unconflicted_ind]
emb_conf = emb[conflicted_ind]
previous_conflicted = conflicted_ind
previous_unconflicted = unconflicted_ind
else :
epoch_stable += 1
z_mu = z_mu[previous_unconflicted]
z_sigma2_log = z_sigma2_log[previous_unconflicted]
emb_unconf = emb[previous_unconflicted]
emb_conf = emb[previous_conflicted]
if epoch_stable >= 15:
epoch_stable = 0
beta1 = beta1 * 0.96
beta2 = beta2 * 0.98
if epoch % 50 == 0 and epoch <= 200 :
adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)
loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf)
epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy()))
y_pred = self.predict(emb)
cm = clustering_metrics(y, y_pred)
acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel()
elbo_loss.backward()
opti.step()
lr_s.step()
def gaussian_pdfs_log(self,x,mus,log_sigma2s):
G=[]
for c in range(self.nClusters):
G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1))
return torch.cat(G,1)
def gaussian_pdf_log(self,x,mu,log_sigma2):
c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1)
return c
def predict(self, z):
pi = self.pi
log_sigma2_c = self.log_sigma2_c
mu_c = self.mu_c
det = 1e-2
yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det
yita = yita_c.detach().numpy()
return np.argmax(yita, axis=1)
def encode(self, x_features, adj):
hidden = self.base_gcn(x_features, adj)
self.mean = self.gcn_mean(hidden, adj)
self.logstd = self.gcn_logstddev(hidden, adj)
gaussian_noise = torch.randn(x_features.size(0), self.embedding_size)
sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean
return self.mean, self.logstd ,sampled_z
@staticmethod
def decode(z):
A_pred = torch.sigmoid(torch.matmul(z,z.t()))
return A_pred | 2.1875 | 2 |
odoo-13.0/addons/stock_account/models/account_chart_template.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 4510 | <filename>odoo-13.0/addons/stock_account/models/account_chart_template.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
import logging
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = "account.chart.template"
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}]
return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add)
def generate_properties(self, acc_template_ref, company, property_list=None):
res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company)
PropertyObj = self.env['ir.property'] # Property Stock Journal
value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1)
if value:
field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1)
vals = {
'name': 'property_stock_journal',
'company_id': company.id,
'fields_id': field.id,
'value': 'account.journal,%s' % value.id,
}
properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)])
if properties:
# the property exist: modify it
properties.write(vals)
else:
# create the property
PropertyObj.create(vals)
todo_list = [ # Property Stock Accounts
'property_stock_account_input_categ_id',
'property_stock_account_output_categ_id',
'property_stock_valuation_account_id',
]
for record in todo_list:
account = getattr(self, record)
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1)
vals = {
'name': record,
'company_id': company.id,
'fields_id': field.id,
'value': value,
}
properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1)
if not properties:
# create the property
PropertyObj.create(vals)
elif not properties.value_reference:
# update the property if False
properties.write(vals)
return res
| 2 | 2 |
lib/roi_data/loader.py | BarneyQiao/pcl.pytorch | 233 | 4511 | import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db, self._num_classes)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data'] = blobs['data'].squeeze(axis=0)
return blobs
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
def __iter__(self):
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists = []
for blobs in list_of_blobs:
lists.append({'data' : blobs.pop('data'),
'rois' : blobs.pop('rois'),
'labels' : blobs.pop('labels')})
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
minibatch = default_collate(mini_list)
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
| 1.921875 | 2 |
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py | andywu113/fuhe_predict | 3 | 4512 | <reponame>andywu113/fuhe_predict<gh_stars>1-10
import warnings
from distutils.version import LooseVersion
import numpy as np
import pytest
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', verbose=10)
sys.stdout = old_stdout
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
_, _, coef_path_ = linear_model.lars_path(
X, y, Gram=G, method='lar')
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def _assert_same_lars_path_result(output1, output2):
assert_equal(len(output1), len(output2))
for o1, o2 in zip(output1, output2):
assert_allclose(o1, o2)
@pytest.mark.parametrize('method', ['lar', 'lasso'])
@pytest.mark.parametrize('return_path', [True, False])
def test_lars_path_gram_equivalent(method, return_path):
_assert_same_lars_path_result(
linear_model.lars_path_gram(
Xy=Xy, Gram=G, n_samples=n_samples, method=method,
return_path=return_path),
linear_model.lars_path(
X, y, Gram=G, method=method,
return_path=return_path))
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None,
Xy=Xy)
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy,
method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
@pytest.mark.filterwarnings('ignore: `rcond` parameter will change')
# numpy deprecation
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
# numpy deprecation
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar')
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', Gram=G, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
@pytest.mark.parametrize(
'classifier',
[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])
def test_lars_precompute(classifier):
# Check for different values of precompute
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
_, _, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in (
[[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]]
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd():
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert np.all(np.diff(lasso.alphas_) < 0)
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, 'n_nonzero_coefs')
@pytest.mark.filterwarnings('ignore::FutureWarning')
def test_lars_cv_max_iter():
with warnings.catch_warnings(record=True) as w:
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5)
lars_cv.fit(X, y)
assert len(w) == 0
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
# Once deprecation of LAR + positive option is done use these:
# assert_raises(ValueError, linear_model.lars_path, diabetes['data'],
# diabetes['target'], method='lar', positive=True)
with pytest.warns(DeprecationWarning, match='broken'):
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method='lar',
positive=True)
method = 'lasso'
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=False)
assert coefs.min() < 0
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=True)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
| 2.34375 | 2 |
parser.py | FeroxTL/pynginxconfig-new | 8 | 4513 | <filename>parser.py
#coding: utf8
import copy
import re
from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location
def parse(s, parent_block):
config = copy.copy(s)
pos, brackets_level, param_start = 0, 0, 0
while pos < len(config):
if config[pos] == '#' and brackets_level == 0:
re_sharp_comment = re.search('(?P<offset>[\s\n]*)#(?P<comment>.*)$', config, re.M)
sharp_comment = re_sharp_comment.groupdict()
parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment']))
config = config[re_sharp_comment.end():]
pos, param_start = 0, 0
continue
if config[pos] == ';' and brackets_level == 0:
re_option = re.search('\s*(?P<param_name>\w+)\s*(?P<param_options>.*?);', config[param_start:], re.S)
if not re_option:
raise Exception('Wrong option')
option = re_option.groupdict()
parent_block[option['param_name']] = KeyValueOption(re.sub('[ \n]+', ' ', option['param_options']))
config = config[re_option.end():]
pos, param_start = 0, 0
continue
if config[pos] == '{':
brackets_level += 1
elif config[pos] == '}':
brackets_level -= 1
if brackets_level == 0 and param_start is not None:
re_block = re.search(
'(?P<param_name>\w+)\s*(?P<param_options>.*)\s*{(\n){0,1}(?P<block>(.|\n)*)}',
config[param_start:pos + 1],
)
block = re_block.groupdict()
if block['param_name'].lower() == 'location':
new_block = Location(block['param_options'])
parent_block.add_location(new_block)
else:
new_block = Block()
parent_block[block['param_name']] = new_block
if block['block']:
parse(block['block'], new_block)
config = config[re_block.end():]
pos, param_start = 0, 0
continue
pos += 1
if brackets_level != 0:
raise Exception('Not closed bracket')
qwe = EmptyBlock()
parse("""#{ asd #qweqeqwe{}
servername qweqweqweqweqwe; # comment {lalalal} #1
server {
listen
8080
tls;
root /data/up1;
location / {
l200;
}
location /qwe{
s 500;
}#123
}#qweqwe""", qwe)
print(qwe.render())
qwe = EmptyBlock()
parse(""" servername wqeqweqwe;
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
# server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# Logging Settings
##
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
##
# Gzip Settings
##
gzip on;
gzip_disable "msie6";
}#123123
""", qwe)
print(qwe.render())
| 2.984375 | 3 |
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py | triompha/EarthWarrior3D | 0 | 4514 | import os
import subprocess
import sys
print 'Build Config:'
print ' Host:win7 x86'
print ' Branch:develop'
print ' Target:win32'
print ' "%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"'
if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False):
node_name = os.environ['NODE_NAME']
source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name
source_dir = source_dir.replace("/", os.sep)
os.system("xcopy " + source_dir + " . /E /Y /H")
os.system('git pull origin develop')
os.system('git submodule update --init --force')
ret = subprocess.call('"%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"', shell=True)
os.system('git clean -xdf -f')
print 'build exit'
print ret
if ret == 0:
exit(0)
else:
exit(1)
| 2.03125 | 2 |
iris_sdk/models/data/ord/rate_center_search_order.py | NumberAI/python-bandwidth-iris | 2 | 4515 | <filename>iris_sdk/models/data/ord/rate_center_search_order.py
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.ord.rate_center_search_order import \
RateCenterSearchOrderMap
class RateCenterSearchOrder(RateCenterSearchOrderMap, BaseData):
pass | 1.5625 | 2 |
optimizer.py | thanusha22/CEC-1 | 0 | 4516 | <reponame>thanusha22/CEC-1
from pathlib import Path
import optimizers.PSO as pso
import optimizers.MVO as mvo
import optimizers.GWO as gwo
import optimizers.MFO as mfo
import optimizers.CS as cs
import optimizers.BAT as bat
import optimizers.WOA as woa
import optimizers.FFA as ffa
import optimizers.SSA as ssa
import optimizers.GA as ga
import optimizers.HHO as hho
import optimizers.SCA as sca
import optimizers.JAYA as jaya
import optimizers.HYBRID as hybrid
import benchmarks
import csv
import numpy
import time
import warnings
import os
import plot_convergence as conv_plot
import plot_boxplot as box_plot
warnings.simplefilter(action="ignore")
def selector(algo, func_details, popSize, Iter):
function_name = func_details[0]
lb = func_details[1]
ub = func_details[2]
dim = func_details[3]
if algo == "SSA":
x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "PSO":
x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GA":
x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "BAT":
x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "FFA":
x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "GWO":
x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "WOA":
x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MVO":
x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "MFO":
x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "CS":
x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "HHO":
x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "SCA":
x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "JAYA":
x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
elif algo == "HYBRID":
x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)
else:
return null
return x
def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
"""
It serves as the main interface of the framework for running the experiments.
Parameters
----------
optimizer : list
The list of optimizers names
objectivefunc : list
The list of benchmark functions
NumOfRuns : int
The number of independent runs
params : set
The set of parameters which are:
1. Size of population (PopulationSize)
2. The number of iterations (Iterations)
export_flags : set
The set of Boolean flags which are:
1. Export (Exporting the results in a file)
2. Export_details (Exporting the detailed results in files)
3. Export_convergence (Exporting the covergence plots)
4. Export_boxplot (Exporting the box plots)
Returns
-----------
N/A
"""
# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params["PopulationSize"]
Iterations = params["Iterations"]
# Export results ?
Export = export_flags["Export_avg"]
Export_details = export_flags["Export_details"]
Export_convergence = export_flags["Export_convergence"]
Export_boxplot = export_flags["Export_boxplot"]
Flag = False
Flag_details = False
# CSV Header for for the cinvergence
CnvgHeader = []
results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/"
Path(results_directory).mkdir(parents=True, exist_ok=True)
for l in range(0, Iterations):
CnvgHeader.append("Iter" + str(l + 1))
for i in range(0, len(optimizer)):
for j in range(0, len(objectivefunc)):
convergence = [0] * NumOfRuns
executionTime = [0] * NumOfRuns
for k in range(0, NumOfRuns):
func_details = benchmarks.getFunctionDetails(objectivefunc[j])
x = selector(optimizer[i], func_details, PopulationSize, Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if Export_details == True:
ExportToFile = results_directory + "experiment_details.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag_details == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag_details = True # at least one experiment
executionTime[k] = x.executionTime
a = numpy.concatenate(
[[x.optimizer, x.objfname, x.executionTime], x.convergence]
)
writer.writerow(a)
out.close()
if Export == True:
ExportToFile = results_directory + "experiment.csv"
with open(ExportToFile, "a", newline="\n") as out:
writer = csv.writer(out, delimiter=",")
if (
Flag == False
): # just one time to write the header of the CSV file
header = numpy.concatenate(
[["Optimizer", "objfname", "ExecutionTime"], CnvgHeader]
)
writer.writerow(header)
Flag = True
avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(
numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2
).tolist()
a = numpy.concatenate(
[[optimizerName, objfname, avgExecutionTime], avgConvergence]
)
writer.writerow(a)
out.close()
if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Export_boxplot == True:
box_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Flag == False: # Faild to run at least one experiment
print(
"No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions"
)
print("Execution completed")
| 1.921875 | 2 |
tests/fields/test_primitive_types.py | slawak/dataclasses-avroschema | 0 | 4517 | import dataclasses
import pytest
from dataclasses_avroschema import fields
from . import consts
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, dataclasses.MISSING)
avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type]
assert {"name": name, "type": avro_type} == field.to_dict()
@pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES)
def test_primitive_types_with_default_value_none(primitive_type):
name = "a_field"
field = fields.Field(name, primitive_type, None)
avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]]
assert {"name": name, "type": avro_type, "default": fields.NULL} == field.to_dict()
@pytest.mark.parametrize("primitive_type,default", consts.PRIMITIVE_TYPES_AND_DEFAULTS)
def test_primitive_types_with_default_value(primitive_type, default):
name = "a_field"
field = fields.Field(name, primitive_type, default)
avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL]
assert {"name": name, "type": avro_type, "default": default} == field.to_dict()
@pytest.mark.parametrize(
"primitive_type,invalid_default", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS
)
def test_invalid_default_values(primitive_type, invalid_default):
name = "a_field"
field = fields.Field(name, primitive_type, invalid_default)
msg = f"Invalid default type. Default should be {primitive_type}"
with pytest.raises(AssertionError, match=msg):
field.to_dict()
| 2.53125 | 3 |
Bindings/Python/examples/Moco/examplePredictAndTrack.py | mcx/opensim-core | 532 | 4518 | <reponame>mcx/opensim-core
# -------------------------------------------------------------------------- #
# OpenSim Moco: examplePredictAndTrack.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2018 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import os
import math
import opensim as osim
"""
This file performs the following problems using a
double pendulum model:
1. predict an optimal trajectory (and controls),
2. track the states from the optimal trajectory, and
3. track the marker trajectories from the optimal trajectory.
"""
visualize = True
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') == '0':
visualize = False
# Create a model of a double pendulum.
# ------------------------------------
def createDoublePendulumModel():
model = osim.Model()
model.setName("double_pendulum")
# Create two links, each with a mass of 1 kg, center of mass at the body's
# origin, and moments and products of inertia of zero.
b0 = osim.Body("b0", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b0)
b1 = osim.Body("b1", 1, osim.Vec3(0), osim.Inertia(1))
model.addBody(b1)
# Add markers to body origin locations.
m0 = osim.Marker("m0", b0, osim.Vec3(0))
m1 = osim.Marker("m1", b1, osim.Vec3(0))
model.addMarker(m0)
model.addMarker(m1)
# Connect the bodies with pin joints. Assume each body is 1 m long.
j0 = osim.PinJoint("j0", model.getGround(), osim.Vec3(0), osim.Vec3(0),
b0, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q0 = j0.updCoordinate()
q0.setName("q0")
j1 = osim.PinJoint("j1",
b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0))
q1 = j1.updCoordinate()
q1.setName("q1")
model.addJoint(j0)
model.addJoint(j1)
tau0 = osim.CoordinateActuator()
tau0.setCoordinate(j0.updCoordinate())
tau0.setName("tau0")
tau0.setOptimalForce(1)
model.addComponent(tau0)
tau1 = osim.CoordinateActuator()
tau1.setCoordinate(j1.updCoordinate())
tau1.setName("tau1")
tau1.setOptimalForce(1)
model.addComponent(tau1)
# Add display geometry.
bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1)
transform = osim.Transform(osim.Vec3(-0.5, 0, 0))
b0Center = osim.PhysicalOffsetFrame("b0_center", b0, transform)
b0.addComponent(b0Center)
b0Center.attachGeometry(bodyGeometry.clone())
b1Center = osim.PhysicalOffsetFrame("b1_center", b1, transform)
b1.addComponent(b1Center)
b1Center.attachGeometry(bodyGeometry.clone())
model.finalizeConnections()
model.printToXML("double_pendulum.osim")
return model
def solvePrediction():
# Predict the optimal trajectory for a minimum time swing-up.
# In the diagram below, + represents the origin, and ---o represents a link
# in the double pendulum.
#
# o
# |
# o
# |
# +---o---o +
#
# iniital pose final pose
#
study = osim.MocoStudy()
study.setName("double_pendulum_predict")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
problem.setTimeBounds(0, [0, 5])
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0, 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0, 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: minimize final time and error from desired
# end effector position.
ftCost = osim.MocoFinalTimeGoal()
ftCost.setWeight(0.001)
problem.addGoal(ftCost)
finalCost = osim.MocoMarkerFinalGoal()
finalCost.setName("final")
finalCost.setWeight(1000.0)
finalCost.setPointName("/markerset/m1")
finalCost.setReferenceLocation(osim.Vec3(0, 2, 0))
problem.addGoal(finalCost)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(100)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
guess = solver.createGuess()
guess.setNumTimes(2)
guess.setTime([0, 1])
guess.setState("/jointset/j0/q0/value", [0, -math.pi])
guess.setState("/jointset/j1/q1/value", [0, 2*math.pi])
guess.setState("/jointset/j0/q0/speed", [0, 0])
guess.setState("/jointset/j1/q1/speed", [0, 0])
guess.setControl("/tau0", [0, 0])
guess.setControl("/tau1", [0, 0])
guess.resampleWithNumTimes(10)
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_predict.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_predict_solution.sto")
if visualize:
study.visualize(solution)
return solution
def computeMarkersReference(predictedSolution):
model = createDoublePendulumModel()
model.initSystem()
states = predictedSolution.exportToStatesTable()
statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states)
markerTrajectories = osim.TimeSeriesTableVec3()
markerTrajectories.setColumnLabels(["/markerset/m0", "/markerset/m1"])
for state in statesTraj:
model.realizePosition(state)
m0 = model.getComponent("markerset/m0")
m1 = model.getComponent("markerset/m1")
markerTrajectories.appendRow(state.getTime(),
osim.RowVectorVec3([m0.getLocationInGround(state),
m1.getLocationInGround(state)]))
# Assign a weight to each marker.
markerWeights = osim.SetMarkerWeights()
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0", 1))
markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1", 5))
return osim.MarkersReference(markerTrajectories, markerWeights)
def solveStateTracking(stateRef):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = stateRef.getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-150, 150])
problem.setControlInfo("/tau1", [-150, 150])
# Cost: track provided state data.
stateTracking = osim.MocoStateTrackingGoal()
stateTracking.setReference(osim.TableProcessor(stateRef))
problem.addGoal(stateTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.001)
# TODO problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_states.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_states_solution.sto")
if visualize:
study.visualize(solution)
return solution
def solveMarkerTracking(markersRef, guess):
# Predict the optimal trajectory for a minimum time swing-up.
study = osim.MocoStudy()
study.setName("double_pendulum_track")
problem = study.updProblem()
# Model (dynamics).
problem.setModel(createDoublePendulumModel())
# Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1]
problem.setTimeBounds(0, finalTime)
problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0)
problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0)
problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0)
problem.setControlInfo("/tau0", [-100, 100])
problem.setControlInfo("/tau1", [-100, 100])
# Cost: track provided marker data.
markerTracking = osim.MocoMarkerTrackingGoal()
markerTracking.setMarkersReference(markersRef)
problem.addGoal(markerTracking)
effort = osim.MocoControlGoal()
effort.setName("effort")
effort.setWeight(0.0001)
# problem.addGoal(effort)
# Configure the solver.
solver = study.initTropterSolver()
solver.set_num_mesh_intervals(50)
solver.set_verbosity(2)
solver.set_optim_solver("ipopt")
solver.set_optim_jacobian_approximation("exact")
solver.set_optim_hessian_approximation("exact")
solver.set_exact_hessian_block_sparsity_mode("dense")
solver.setGuess(guess)
# Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_markers.omoco")
# Solve the problem.
solution = study.solve()
solution.write("examplePredictAndTrack_track_markers_solution.sto")
if visualize:
study.visualize(solution)
return solution
optimalTrajectory = solvePrediction()
markersRef = computeMarkersReference(optimalTrajectory)
trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable())
trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
| 1.867188 | 2 |
StorageSystem.py | aaronFritz2302/ZoomAuto | 0 | 4519 | <gh_stars>0
import sqlite3
from pandas import DataFrame
conn = sqlite3.connect('./data.db',check_same_thread=False)
class DataBase():
cursor = conn.cursor()
def __init__(self):
self.createTable()
def createTable(self):
'''
Creates A Table If it Doesnt Exist
'''
conn.execute("""CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)""")
def enterData(self,meetingData):
'''
Enters Data From The UI Table To The DataBase
'''
meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False)
def readData(self):
'''
Reads Data From The SQL DataBase
'''
self.cursor.execute('''SELECT * FROM MeetingData''')
retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video'])
return retVal | 3.75 | 4 |
pymapd/_parsers.py | mflaxman10/pymapd | 0 | 4520 | """
Utility methods for parsing data returned from MapD
"""
import datetime
from collections import namedtuple
from sqlalchemy import text
import mapd.ttypes as T
from ._utils import seconds_to_time
Description = namedtuple("Description", ["name", "type_code", "display_size",
"internal_size", "precision", "scale",
"null_ok"])
ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable",
"precision", "scale",
"comp_param"])
_typeattr = {
'SMALLINT': 'int',
'INT': 'int',
'BIGINT': 'int',
'TIME': 'int',
'TIMESTAMP': 'int',
'DATE': 'int',
'BOOL': 'int',
'FLOAT': 'real',
'DECIMAL': 'real',
'DOUBLE': 'real',
'STR': 'str',
}
_thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES
_thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES
def _extract_row_val(desc, val):
# type: (T.TColumnType, T.TDatum) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
if val.is_null:
return None
val = getattr(val.val, _typeattr[typename] + '_val')
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
val = (base + datetime.timedelta(seconds=val))
elif typename == 'DATE':
val = (base + datetime.timedelta(seconds=val)).date()
elif typename == 'TIME':
val = seconds_to_time(val)
return val
def _extract_col_vals(desc, val):
# type: (T.TColumnType, T.TColumn) -> Any
typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]
nulls = val.nulls
vals = getattr(val.data, _typeattr[typename] + '_col')
vals = [None if null else v
for null, v in zip(nulls, vals)]
base = datetime.datetime(1970, 1, 1)
if typename == 'TIMESTAMP':
vals = [None if v is None else base + datetime.timedelta(seconds=v)
for v in vals]
elif typename == 'DATE':
vals = [None if v is None else (base +
datetime.timedelta(seconds=v)).date()
for v in vals]
elif typename == 'TIME':
vals = [None if v is None else seconds_to_time(v) for v in vals]
return vals
def _extract_description(row_desc):
# type: (List[T.TColumnType]) -> List[Description]
"""
Return a tuple of (name, type_code, display_size, internal_size,
precision, scale, null_ok)
https://www.python.org/dev/peps/pep-0249/#description
"""
return [Description(col.col_name, col.col_type.type,
None, None, None, None,
col.col_type.nullable)
for col in row_desc]
def _extract_column_details(row_desc):
# For Connection.get_table_details
return [
ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type],
x.col_type.nullable, x.col_type.precision,
x.col_type.scale, x.col_type.comp_param)
for x in row_desc
]
def _is_columnar(data):
# type: (T.TQueryResult) -> bool
return data.row_set.is_columnar
def _load_schema(buf):
"""
Load a `pyarrow.Schema` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
Returns
-------
schema : pyarrow.Schema
"""
import pyarrow as pa
reader = pa.RecordBatchStreamReader(buf)
return reader.schema
def _load_data(buf, schema):
"""
Load a `pandas.DataFrame` from a buffer written to shared memory
Parameters
----------
buf : pyarrow.Buffer
shcema : pyarrow.Schema
Returns
-------
df : pandas.DataFrame
"""
import pyarrow as pa
message = pa.read_message(buf)
rb = pa.read_record_batch(message, schema)
return rb.to_pandas()
def _parse_tdf_gpu(tdf):
"""
Parse the results of a select ipc_gpu into a GpuDataFrame
Parameters
----------
tdf : TDataFrame
Returns
-------
gdf : GpuDataFrame
"""
import numpy as np
from pygdf.gpuarrow import GpuArrowReader
from pygdf.dataframe import DataFrame
from numba import cuda
from numba.cuda.cudadrv import drvapi
from .shm import load_buffer
ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle)
ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size)
ctx = cuda.current_context()
dptr = ipch.open(ctx)
schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size)
# TODO: extra copy.
schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8)
dtype = np.dtype(np.byte)
darr = cuda.devicearray.DeviceNDArray(shape=dptr.size,
strides=dtype.itemsize,
dtype=dtype,
gpu_data=dptr)
reader = GpuArrowReader(schema_buffer, darr)
df = DataFrame()
for k, v in reader.to_dict().items():
df[k] = v
return df
def _bind_parameters(operation, parameters):
return (text(operation)
.bindparams(**parameters)
.compile(compile_kwargs={"literal_binds": True}))
| 2.71875 | 3 |
featuretools/entityset/entity.py | rohit901/featuretools | 1 | 4521 | import logging
import warnings
import dask.dataframe as dd
import numpy as np
import pandas as pd
from featuretools import variable_types as vtypes
from featuretools.utils.entity_utils import (
col_is_datetime,
convert_all_variable_data,
convert_variable_data,
get_linked_vars,
infer_variable_types
)
from featuretools.utils.gen_utils import import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _dataframes_equal
from featuretools.variable_types import Text, find_variable_types
ks = import_or_none('databricks.koalas')
logger = logging.getLogger('featuretools.entityset')
_numeric_types = vtypes.PandasTypes._pandas_numerics
_categorical_types = [vtypes.PandasTypes._categorical]
_datetime_types = vtypes.PandasTypes._pandas_datetimes
class Entity(object):
"""Represents an entity in a Entityset, and stores relevant metadata and data
An Entity is analogous to a table in a relational database
See Also:
:class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`
"""
def __init__(self, id, df, entityset, variable_types=None,
index=None, time_index=None, secondary_time_index=None,
last_time_index=None, already_sorted=False, make_index=False,
verbose=False):
""" Create Entity
Args:
id (str): Id of Entity.
df (pd.DataFrame): Dataframe providing the data for the
entity.
entityset (EntitySet): Entityset for this Entity.
variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of id column in the dataframe.
time_index (str): Name of time column in the dataframe.
secondary_time_index (dict[str -> str]): Dictionary mapping columns
in the dataframe to the time index column they are associated with.
last_time_index (pd.Series): Time index of the last event for each
instance across all child entities.
make_index (bool, optional) : If True, assume index does not exist as a column in
dataframe, and create a new column of that name using integers the (0, len(dataframe)).
Otherwise, assume index exists in dataframe.
"""
_validate_entity_params(id, df, time_index)
created_index, index, df = _create_index(index, make_index, df)
self.id = id
self.entityset = entityset
self.data = {'df': df, 'last_time_index': last_time_index}
self.created_index = created_index
self._verbose = verbose
secondary_time_index = secondary_time_index or {}
self._create_variables(variable_types, index, time_index, secondary_time_index)
self.df = df[[v.id for v in self.variables]]
self.set_index(index)
self.time_index = None
if time_index:
self.set_time_index(time_index, already_sorted=already_sorted)
self.set_secondary_time_index(secondary_time_index)
def __repr__(self):
repr_out = u"Entity: {}\n".format(self.id)
repr_out += u" Variables:"
for v in self.variables:
repr_out += u"\n {} (dtype: {})".format(v.id, v.type_string)
shape = self.shape
repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format(
shape[0], shape[1])
return repr_out
@property
def shape(self):
'''Shape of the entity's dataframe'''
return self.df.shape
def __eq__(self, other, deep=False):
if self.index != other.index:
return False
if self.time_index != other.time_index:
return False
if self.secondary_time_index != other.secondary_time_index:
return False
if len(self.variables) != len(other.variables):
return False
if set(self.variables) != set(other.variables):
return False
if deep:
if self.last_time_index is None and other.last_time_index is not None:
return False
elif self.last_time_index is not None and other.last_time_index is None:
return False
elif self.last_time_index is not None and other.last_time_index is not None:
if not self.last_time_index.equals(other.last_time_index):
return False
if not _dataframes_equal(self.df, other.df):
return False
variables = {variable: (variable, ) for variable in self.variables}
for variable in other.variables:
variables[variable] += (variable, )
for self_var, other_var in variables.values():
if not self_var.__eq__(other_var, deep=True):
return False
return True
def __sizeof__(self):
return sum([value.__sizeof__() for value in self.data.values()])
@property
def df(self):
'''Dataframe providing the data for the entity.'''
return self.data["df"]
@df.setter
def df(self, _df):
self.data["df"] = _df
@property
def last_time_index(self):
'''
Time index of the last event for each instance across all child entities.
'''
return self.data["last_time_index"]
@last_time_index.setter
def last_time_index(self, lti):
self.data["last_time_index"] = lti
def __hash__(self):
return id(self.id)
def __getitem__(self, variable_id):
return self._get_variable(variable_id)
def _get_variable(self, variable_id):
"""Get variable instance
Args:
variable_id (str) : Id of variable to get.
Returns:
:class:`.Variable` : Instance of variable.
Raises:
RuntimeError : if no variable exist with provided id
"""
for v in self.variables:
if v.id == variable_id:
return v
raise KeyError("Variable: %s not found in entity" % (variable_id))
@property
def variable_types(self):
'''Dictionary mapping variable id's to variable types'''
return {v.id: type(v) for v in self.variables}
def convert_variable_type(self, variable_id, new_type,
convert_data=True,
**kwargs):
"""Convert variable in dataframe to different type
Args:
variable_id (str) : Id of variable to convert.
new_type (subclass of `Variable`) : Type of variable to convert to.
entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.
convert_data (bool) : If True, convert underlying data in the EntitySet.
Raises:
RuntimeError : Raises if it cannot convert the underlying data
Examples:
>>> from featuretools.tests.testing_utils import make_ecommerce_entityset
>>> es = make_ecommerce_entityset()
>>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical)
"""
if convert_data:
# first, convert the underlying data (or at least try to)
self.df = convert_variable_data(df=self.df,
column_id=variable_id,
new_type=new_type,
**kwargs)
# replace the old variable with the new one, maintaining order
variable = self._get_variable(variable_id)
new_variable = new_type.create_from(variable)
self.variables[self.variables.index(variable)] = new_variable
def _create_variables(self, variable_types, index, time_index, secondary_time_index):
"""Extracts the variables from a dataframe
Args:
variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable.
index (str): Name of index column
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each map to a list of columns that depend on that secondary time
"""
variables = []
variable_types = variable_types.copy() or {}
string_to_class_map = find_variable_types()
# TODO: Remove once Text has been removed from variable types
string_to_class_map[Text.type_string] = Text
for vid in variable_types.copy():
vtype = variable_types[vid]
if isinstance(vtype, str):
if vtype in string_to_class_map:
variable_types[vid] = string_to_class_map[vtype]
else:
variable_types[vid] = string_to_class_map['unknown']
warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype))
if index not in variable_types:
variable_types[index] = vtypes.Index
link_vars = get_linked_vars(self)
inferred_variable_types = infer_variable_types(self.df,
link_vars,
variable_types,
time_index,
secondary_time_index)
inferred_variable_types.update(variable_types)
for v in inferred_variable_types:
# TODO document how vtype can be tuple
vtype = inferred_variable_types[v]
if isinstance(vtype, tuple):
# vtype is (ft.Variable, dict_of_kwargs)
_v = vtype[0](v, self, **vtype[1])
else:
_v = inferred_variable_types[v](v, self)
variables += [_v]
# convert data once we've inferred
self.df = convert_all_variable_data(df=self.df,
variable_types=inferred_variable_types)
# make sure index is at the beginning
index_variable = [v for v in variables
if v.id == index][0]
self.variables = [index_variable] + [v for v in variables
if v.id != index]
def update_data(self, df, already_sorted=False,
recalculate_last_time_indexes=True):
'''Update entity's internal dataframe, optionaly making sure data is sorted,
reference indexes to other entities are consistent, and last_time_indexes
are consistent.
'''
if len(df.columns) != len(self.variables):
raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns),
len(self.variables)))
for v in self.variables:
if v.id not in df.columns:
raise ValueError("Updated dataframe is missing new {} column".format(v.id))
# Make sure column ordering matches variable ordering
self.df = df[[v.id for v in self.variables]]
self.set_index(self.index)
if self.time_index is not None:
self.set_time_index(self.time_index, already_sorted=already_sorted)
self.set_secondary_time_index(self.secondary_time_index)
if recalculate_last_time_indexes and self.last_time_index is not None:
self.entityset.add_last_time_indexes(updated_entities=[self.id])
self.entityset.reset_data_description()
def add_interesting_values(self, max_values=5, verbose=False):
"""
Find interesting values for categorical variables, to be used to
generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for variable in self.variables:
# some heuristics to find basic 'where'-able variables
if isinstance(variable, vtypes.Discrete):
variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype)
# TODO - consider removing this constraints
# don't add interesting values for entities in relationships
skip = False
for r in self.entityset.relationships:
if variable in [r.child_variable, r.parent_variable]:
skip = True
break
if skip:
continue
counts = self.df[variable.id].value_counts()
# find how many of each unique value there are; sort by count,
# and add interesting values to each variable
total_count = np.sum(counts)
counts[:] = counts.sort_values()[::-1]
for i in range(min(max_values, len(counts.index))):
idx = counts.index[i]
# add the value to interesting_values if it represents more than
# 25% of the values we have not seen so far
if len(counts.index) < 25:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
else:
fraction = counts[idx] / total_count
if fraction > 0.05 and fraction < 0.95:
if verbose:
msg = "Variable {}: Marking {} as an "
msg += "interesting value"
logger.info(msg.format(variable.id, idx))
variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))
# total_count -= counts[idx]
else:
break
self.entityset.reset_data_description()
def delete_variables(self, variable_ids):
"""
Remove variables from entity's dataframe and from
self.variables
Args:
variable_ids (list[str]): Variables to delete
Returns:
None
"""
# check if variable is not a list
if not isinstance(variable_ids, list):
raise TypeError('variable_ids must be a list of variable names')
if len(variable_ids) == 0:
return
self.df = self.df.drop(variable_ids, axis=1)
for v_id in variable_ids:
v = self._get_variable(v_id)
self.variables.remove(v)
def set_time_index(self, variable_id, already_sorted=False):
# check time type
if not isinstance(self.df, pd.DataFrame) or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype]
else:
time_to_check = self.df[variable_id].iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type is None:
self.entityset.time_type = time_type
elif self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if is_instance(self.df, (dd, ks), 'DataFrame'):
t = time_type # skip checking values
already_sorted = True # skip sorting
else:
t = vtypes.NumericTimeIndex
if col_is_datetime(self.df[variable_id]):
t = vtypes.DatetimeTimeIndex
# use stable sort
if not already_sorted:
# sort by time variable, then by index
self.df = self.df.sort_values([variable_id, self.index])
self.convert_variable_type(variable_id, t, convert_data=False)
self.time_index = variable_id
def set_index(self, variable_id, unique=True):
"""
Args:
variable_id (string) : Name of an existing variable to set as index.
unique (bool) : Whether to assert that the index is unique.
"""
if isinstance(self.df, pd.DataFrame):
self.df = self.df.set_index(self.df[variable_id], drop=False)
self.df.index.name = None
if unique:
assert self.df.index.is_unique, "Index is not unique on dataframe " \
"(Entity {})".format(self.id)
self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)
self.index = variable_id
def set_secondary_time_index(self, secondary_time_index):
for time_index, columns in secondary_time_index.items():
if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty:
time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype]
else:
time_to_check = self.df[time_index].head(1).iloc[0]
time_type = _check_time_type(time_to_check)
if time_type is None:
raise TypeError("%s time index not recognized as numeric or"
" datetime" % (self.id))
if self.entityset.time_type != time_type:
raise TypeError("%s time index is %s type which differs from"
" other entityset time indexes" %
(self.id, time_type))
if time_index not in columns:
columns.append(time_index)
self.secondary_time_index = secondary_time_index
def _create_index(index, make_index, df):
'''Handles index creation logic base on user input'''
created_index = None
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(("Using first column as index. "
"To change this, specify the index parameter"))
index = df.columns[0]
elif make_index and index in df.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError("Cannot make index: index variable already present")
elif index not in df.columns:
if not make_index:
# Case 4: user names index, it is not in df. does not specify
# make_index. Make new index column and warn
warnings.warn("index {} not found in dataframe, creating new "
"integer column".format(index))
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
if isinstance(df, dd.DataFrame):
df[index] = 1
df[index] = df[index].cumsum() - 1
elif is_instance(df, ks, 'DataFrame'):
df = df.koalas.attach_id_column('distributed-sequence', index)
else:
df.insert(0, index, range(len(df)))
created_index = index
# Case 6: user specified index, which is already in df. No action needed.
return created_index, index, df
def _validate_entity_params(id, df, time_index):
'''Validation checks for Entity inputs'''
assert isinstance(id, str), "Entity id must be a string"
assert len(df.columns) == len(set(df.columns)), "Duplicate column names"
for c in df.columns:
if not isinstance(c, str):
raise ValueError("All column names must be strings (Column {} "
"is not a string)".format(c))
if time_index is not None and time_index not in df.columns:
raise LookupError('Time index not found in dataframe')
| 2.890625 | 3 |
githubdl/url_helpers.py | wilvk/githubdl | 16 | 4522 | <gh_stars>10-100
import re
from urllib.parse import urlparse
import logging
def check_url_is_http(repo_url):
predicate = re.compile('^https?://.*$')
match = predicate.search(repo_url)
return False if match is None else True
def check_url_is_ssh(repo_url):
predicate = re.compile(r'^git\@.*\.git$')
match = predicate.search(repo_url)
return False if match is None else True
def get_domain_name_from_http_url(repo_url):
site_object = urlparse(repo_url)
return site_object.netloc
def get_repo_name_from_http_url(repo_url):
site_object = urlparse(repo_url)
parsed_string = re.sub(r'\.git$', '', site_object.path)
if parsed_string[0] == '/':
return parsed_string[1:]
return parsed_string
def get_repo_name_from_ssh_url(repo_url):
predicate = re.compile(r'(?<=\:)(.*)(?=\.)')
match = predicate.search(repo_url)
return match.group()
def get_domain_name_from_ssh_url(repo_url):
predicate = re.compile(r'(?<=\@)(.*)(?=\:)')
match = predicate.search(repo_url)
return match.group()
def validate_protocol_exists(is_ssh, is_http):
if not is_ssh and not is_http:
err_message = "Error: repository url provided is not http(s) or ssh"
logging.critical(err_message)
raise RuntimeError(err_message)
def check_url_protocol(repo_url):
is_ssh = check_url_is_ssh(repo_url)
is_http = check_url_is_http(repo_url)
validate_protocol_exists(is_ssh, is_http)
return (is_ssh, is_http)
| 2.9375 | 3 |
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py | AlsikeE/Ez | 0 | 4523 | <reponame>AlsikeE/Ez
import itertools
from ez_lib import ez_flow_tool
from collections import defaultdict
from ez_scheduler import EzScheduler
from ez_lib.ez_ob import CenUpdateInfo, UpdateNext
from misc import constants, logger
from domain.message import *
from collections import deque
from misc import global_vars
import time
import eventlet
mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL)
class CenCtrlScheduler(EzScheduler):
def __init__(self, switches_, log_):
self.switches = switches_
super(CenCtrlScheduler, self).__init__(0, log_)
self.remaining_vol_of_dependency_loop_on_link = {}
self.received_updated_msg = defaultdict()
self.received_removed_msg = defaultdict()
########## Begin three properties are used for parallel processes ##########
self.no_of_pending_msgs = {}
self.notification_queues = {x: deque([]) for x in self.switches}
self.current_notification_time = {x: -1 for x in self.switches}
self.current_processing_time = {x: -1 for x in self.switches}
########### End three properties are used for parallel processes ###########
self.to_sames = defaultdict(list)
self.encounter_deadlock = False
self.do_segmentation = True
def reset(self):
super(CenCtrlScheduler, self).reset()
self.remaining_vol_of_dependency_loop_on_link = {}
self.received_updated_msg = defaultdict()
self.received_removed_msg = defaultdict()
########## Begin three properties are used for parallel processes ##########
self.no_of_pending_msgs = {}
self.notification_queues = {x: deque([]) for x in self.switches}
self.current_notification_time = {x: -1 for x in self.switches}
self.current_processing_time = {x: -1 for x in self.switches}
########### End three properties are used for parallel processes ###########
self.to_sames = defaultdict(list)
self.encounter_deadlock = False
self.do_segmentation = True
def __str__(self):
return "Centralized Controller"
@staticmethod
def init_logger():
return logger.getLogger("Centralized Controller", constants.LOG_LEVEL)
def create_dependency_graph(self, old_flows, new_flows):
time_start_computing = time.time() * 1000
ez_flow_tool.create_dependency_graph(old_flows, new_flows,
self.links_by_endpoints, self.segments_by_seg_path_id,
self.to_sames, do_segmentation=self.do_segmentation)
self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id)
self.log.debug(self.links_by_endpoints)
self.log.debug(self.segments_by_seg_path_id)
mulog.info("links by endpoints %s segs_by_segpath_id %s" % (self.links_by_endpoints,self.segments_by_seg_path_id))
# self.log.info("time to compute dependency graph: %s" % str(time() * 1000 - time_start_computing))
def process_coherent(self):
send_to_sames = set()
for key in self.to_sames.keys():
to_same = self.to_sames[key]
for sw in to_same:
send_to_sames.add(sw)
# for sw in send_to_sames:
# msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0)
# self.send_to_switch(msg, sw)
def compute_required_vol_for_dependency_loop(self, link):
self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0
for add_op in link.to_adds_loop:
self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \
+= self.segments_by_seg_path_id[add_op.seg_path_id].vol
def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id):
# pool = eventlet.GreenPool()
mulog.info("start finding dependency loop and sort updates")
mulog.info(links_by_endpoints)
for sw in self.switches:
# pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw,
# links_by_endpoints, segments_by_seg_path_id)
self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id)
# pool.waitall()
# for link in links_by_endpoints.values():
# ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id)
# global_vars.finish_prioritizing_time = time.clock()
def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id):
for link in links_by_endpoints.values():
if link.src == sw:
ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id)
for link in links_by_endpoints.values():
if link.src == sw:
self.compute_required_vol_for_dependency_loop(link)
current_time = time.clock()
if global_vars.finish_computation_time < current_time:
global_vars.finish_computation_time = time.clock()
def execute_all_remove_only_updates(self, update_infos):
for l_segment in self.segments_by_seg_path_id.values():
old_sws = set(l_segment.old_seg)
old_sws.add(l_segment.init_sw)
seg_path_id = l_segment.seg_path_id
self.received_removed_msg[seg_path_id] = set()
if l_segment.remove_only:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
for sw in old_sws:
update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id,
sw, constants.REMOVE_NEXT)
l_segment.update_status = constants.SENT_REMOVING
def update_message_queues(self, update_infos, process_update_info_func):
increased = set()
related_sws = set([])
for key in update_infos.keys():
update_info = update_infos[key]
# self.logger.info("Process update info %s at %d ms from starting" % (update_info, (time() - self.current_start_time)*1000))
assert update_info, CenUpdateInfo
for sw in update_infos[key].update_nexts.keys():
if sw not in increased:
self.current_notification_time[sw] += 1
increased.add(sw)
self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0
#update_next = update_info.update_nexts[sw]
process_update_info_func(sw, update_info)
self.log.debug("add message in processing update_info: %s" % update_info)
self.log.debug("pending messages: %s" % str(self.no_of_pending_msgs))
related_sws.add(sw) #self.datapaths[sw + 1])
return related_sws
def increase_processing_time(self, sw):
self.current_processing_time[sw] += 1
def enque_msg_to_notification_queue(self, sw, msg):
self.notification_queues[sw].append(msg)
self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1
def deque_msg_from_notification_queue(self, sw):
msg = self.notification_queues[sw].popleft()
self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1
return msg
def has_pending_msg_of_sw(self, sw):
return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0
# def check_all_capable_for_link(self, link, executable_segments_by_link):
# capable_segments = []
# done_loop = True
# endpoints = (link.src, link.dst)
# total_vol = 0
# for op in link.to_adds_loop:
# l_segment = self.segments_by_seg_path_id[op.seg_path_id]
# if l_segment.update_status == constants.NOTHING:
# done_loop = False
# total_vol += l_segment.vol
#
# def check_and_send_possible_update_by_link(self, update_infos):
# executable_segments_by_link = {}
# executable_link_by_segments = {}
# for link in self.links_by_endpoints.values():
# self.check_all_capable_for_link(link, executable_segments_by_link)
def total_pending_cycle_vol(self, link):
total_vol = 0
for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only:
total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol
return total_vol
def check_to_split(self, link, l_segment):
pass
def splittable_vol(self, seg_path_id):
# TODO: Update remaining_vol_of_loop when adding or removing segment
final_split_vol = 0
l_segment = self.segments_by_seg_path_id[seg_path_id]
for endpoints in l_segment.new_link_seg:
link = self.links_by_endpoints[endpoints]
is_add_only = False
for op in link.to_adds_only:
if op.seg_path_id == seg_path_id:
return 0
splittable, split_vol = self.check_to_split(link, l_segment)
if splittable and final_split_vol > split_vol > 0:
final_split_vol = split_vol
self.log.debug("capable %s" % l_segment)
return final_split_vol
def check_and_send_possible_split_updates(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
split_vol = self.splittable_vol(l_segment.seg_path_id)
if split_vol > 0:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
# self.log.debug("send to sw%s" % str(l_segment.new_seg[i]))
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
l_segment.is_splitting = True
for pair in l_segment.new_link_seg:
self.log.info("avail_cap of link %s: %f, "
"give %f to segment %s" % (str(pair),
self.links_by_endpoints[pair].avail_cap,
l_segment.vol,
str(l_segment.seg_path_id)))
self.links_by_endpoints[pair].avail_cap -= split_vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_possible_update_by_links(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
for pair in l_segment.new_link_seg:
self.links_by_endpoints[pair].avail_cap -= l_segment.vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_and_send_possible_updates(self, update_infos):
has_execution = True
while has_execution:
has_execution = False
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status != constants.NOTHING:
continue
seg_path_id = l_segment.seg_path_id
self.log.debug(l_segment)
mulog.info("chk&send psb_uds for linksegment %s"%l_segment)
if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,
l_segment.flow_src,
l_segment.flow_dst)
update_info = update_infos[seg_path_id]
update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,
l_segment.new_seg[0],
constants.UPDATE_NEXT)
for i in range(len(l_segment.new_seg) - 1):
next_sw = l_segment.new_seg[i + 1]
update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.ADD_NEXT)
self.received_updated_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_ADDING
for pair in l_segment.new_link_seg:
self.links_by_endpoints[pair].avail_cap -= l_segment.vol
for u_op in self.links_by_endpoints[pair].to_adds_loop:
if u_op.seg_path_id == l_segment.seg_path_id:
self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol
count = 0
for l_segment in self.segments_by_seg_path_id.values():
if l_segment.update_status == constants.NOTHING:
count += 1
self.log.debug("number of flows that is not done anything %d" % count)
def check_and_do_next_update(self, msg):
update_infos = defaultdict(CenUpdateInfo)
if not self.received_updated_msg.has_key(msg.seg_path_id):
self.received_updated_msg[msg.seg_path_id] = set()
self.received_updated_msg[msg.seg_path_id].add(msg.src_id)
self.log.debug("handle updated msg %s" % msg)
assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True
link_segment = self.segments_by_seg_path_id[msg.seg_path_id]
# self.log.info("receive updated msgs for segment %s, new_seg_length = %d"
# % (str(link_segment.seg_path_id), len(link_segment.new_seg)))
if link_segment.update_status == constants.SENT_ADDING \
and len(self.received_updated_msg[msg.seg_path_id]) == \
len(link_segment.new_seg):
self.finish_adding_new_path(link_segment, update_infos)
return update_infos
def finish_adding_new_path(self, link_segment, update_infos):
self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000
if len(link_segment.old_seg) < 1:
link_segment.update_status = constants.FINISH_ALL
else:
# self.log.info("receive enough updated msgs for segment %s" % str(link_segment.seg_path_id))
link_segment.update_status = constants.FINISH_ADDING
self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment)
def remove_segment_and_check_to_update(self, msg):
assert isinstance(msg, NotificationMessage)
update_infos = defaultdict(CenUpdateInfo)
self.log.debug("handle removed msg %s" % msg)
self.received_removed_msg[msg.seg_path_id].add(msg.src_id)
link_segment = self.segments_by_seg_path_id[msg.seg_path_id]
next_idx = 0
if msg.src_id != link_segment.init_sw:
next_idx = link_segment.old_seg.index(msg.src_id) + 1
if next_idx < len(link_segment.old_seg):
dst = link_segment.old_seg[next_idx]
pair = (msg.src_id, dst)
self.links_by_endpoints[pair].avail_cap += link_segment.vol
# self.log.info("avail_cap of link %d->%d: %f, "
# "get from segment %s" % (msg.src_id, dst,
# self.links_by_endpoints[pair].avail_cap,
# str(link_segment.seg_path_id)))
if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1:
link_segment.update_status = constants.FINISH_ALL
self.log.debug("finish %s" % str(link_segment.seg_path_id))
self.check_and_send_possible_updates(update_infos)
return update_infos
def check_finish_update(self):
count = 0
finished = True
for link_segment in self.segments_by_seg_path_id.values():
if link_segment.update_status != constants.FINISH_ALL:
update_status = ''
if link_segment.update_status == constants.NOTHING:
count += 1
update_status = "NOTHING"
if link_segment.update_status == constants.SENT_ADDING:
self.log.debug("must receive %d more UPDATED msgs" % (len(link_segment.new_seg)-1))
self.log.debug("received from: %s" % self.received_updated_msg[link_segment.seg_path_id])
update_status = "SENT_ADDING"
elif link_segment.update_status == constants.SENT_REMOVING:
self.log.debug("must receive %d more REMOVED msgs" % (len(link_segment.old_seg)-1))
self.log.debug("received from: %s" % self.received_removed_msg[link_segment.seg_path_id])
update_status = "SENT REMOVING"
elif link_segment.update_status == constants.FINISH_ADDING:
update_status = "FINISH_ADDING"
elif link_segment.update_status == constants.FINISH_REMOVING:
update_status = "FINISH_REMOVING"
self.log.debug("segment %s is not finished! update_status %s." % (str(link_segment.seg_path_id), update_status))
# return False
finished = False
break
has_no_pending_barrier = self.has_not_pending_msg()
if not has_no_pending_barrier:
return constants.ON_GOING
elif not finished:
self.log.debug("number of flows that is not done anything %d" % count)
self.scheduling_mode = constants.CONGESTION_MODE
return constants.ENCOUNTER_DEADLOCK
else:
current_mode = self.scheduling_mode
self.scheduling_mode = constants.NORMAL_MODE
if current_mode == constants.CONGESTION_MODE:
return constants.FINISHED_WITH_DEADLOCK
else:
return constants.FINISHED_WITHOUT_DEADLOCK
def has_not_pending_msg(self):
self.log.debug("pending queue: %s" % str(self.no_of_pending_msgs))
for queue_len in self.no_of_pending_msgs.values():
if queue_len > 0:
return False
return True
def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment):
seg_path_id = l_segment.seg_path_id
if not update_infos.has_key(seg_path_id):
update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,
l_segment.flow_dst)
pair = (l_segment.init_sw, l_segment.old_seg[0])
self.links_by_endpoints[pair].avail_cap += l_segment.vol
# self.log.info("avail_cap of link %d->%d: %f, "
# "get from segment %s" % (l_segment.init_sw,
# l_segment.old_seg[0],
# self.links_by_endpoints[pair].avail_cap,
# str(l_segment.seg_path_id)))
if len(l_segment.old_seg) > 1:
for i in range(len(l_segment.old_seg) - 1):
# self.log.debug("send to: %s" % l_segment.old_seg[i])
next_sw = l_segment.old_seg[i + 1]
update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id,
next_sw,
constants.REMOVE_NEXT)
self.received_removed_msg[l_segment.seg_path_id] = set()
l_segment.update_status = constants.SENT_REMOVING
else:
l_segment.update_status = constants.FINISH_ALL
def are_all_moving_in_ops_finished(self, link):
for u_op in link.to_adds + link.to_adds_loop:
current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status
if current_state == constants.NOTHING \
or current_state == constants.SENT_ADDING:
return False
return True
def is_capable(self, seg_path_id):
# TODO: Update remaining_vol_of_loop when adding or removing segment
l_segment = self.segments_by_seg_path_id[seg_path_id]
for endpoints in l_segment.new_link_seg:
link = self.links_by_endpoints[endpoints]
is_dependency_loop_op = False
for op in link.to_adds_loop:
if op.seg_path_id == seg_path_id:
is_dependency_loop_op = True
break
is_add_only = False
for op in link.to_adds_only:
if op.seg_path_id == seg_path_id:
is_add_only = True
break
if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol
< self.remaining_vol_of_dependency_loop_on_link[endpoints])) \
or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\
or (is_add_only and (not self.are_all_moving_in_ops_finished(link)
or link.avail_cap < l_segment.vol)):
return False
self.log.debug("capable %s" % l_segment)
return True
| 1.929688 | 2 |
src/trackbar.py | clovadev/opencv-python | 0 | 4524 | <reponame>clovadev/opencv-python
import numpy as np
import cv2 as cv
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300, 512, 3), np.uint8)
cv.namedWindow('image')
# create trackbars for color change
cv.createTrackbar('R', 'image', 0, 255, nothing)
cv.createTrackbar('G', 'image', 0, 255, nothing)
cv.createTrackbar('B', 'image', 0, 255, nothing)
# create switch for ON/OFF functionality
switch = 'OFF/ON'
cv.createTrackbar(switch, 'image', 0, 1, nothing)
while True:
# get current positions of four trackbars
r = cv.getTrackbarPos('R', 'image')
g = cv.getTrackbarPos('G', 'image')
b = cv.getTrackbarPos('B', 'image')
s = cv.getTrackbarPos(switch, 'image')
# 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상
if s == 0:
img[:] = 0
else:
img[:] = [b, g, r]
# 이미지 표시
cv.imshow('image', img)
if cv.waitKey(10) > 0:
break
cv.destroyAllWindows()
| 3.28125 | 3 |
aoc_2015/src/day20.py | ambertests/adventofcode | 0 | 4525 | <reponame>ambertests/adventofcode
from functools import reduce
# https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
def factors(n):
step = 2 if n%2 else 1
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5)+1, step) if not n % i)))
def solve(target):
house_count = 0
deliveries = {}
complete = set()
pt1 = 0
pt2 = 0
while pt1 == 0 or pt2 == 0:
house_count += 1
gifts1 = 0
gifts2 = 0
elves = factors(house_count)
if pt1 == 0:
gifts1 = sum(elves)*10
if gifts1 >= target:
pt1 = house_count
if pt2 == 0:
working = elves.difference(complete)
for elf in working:
if elf in deliveries:
deliveries[elf] += 1
if deliveries[elf] == 50:
complete.add(elf)
else:
deliveries[elf] = 1
gifts2 = sum(working)*11
if gifts2 >= target:
pt2 = house_count
return pt1, pt2
# takes around 20s
pt1, pt2 = solve(29000000)
print("Part 1:", pt1)
print("Part 2:", pt2)
| 3.703125 | 4 |
setup.py | jean/labels | 1 | 4526 | <reponame>jean/labels<gh_stars>1-10
import pathlib
import setuptools
def read(*args: str) -> str:
file_path = pathlib.Path(__file__).parent.joinpath(*args)
return file_path.read_text("utf-8")
setuptools.setup(
name="labels",
version="0.3.0.dev0",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="MIT",
url="https://github.com/hackebrot/labels",
project_urls={
"Repository": "https://github.com/hackebrot/labels",
"Issues": "https://github.com/hackebrot/labels/issues",
},
description="CLI app for managing GitHub labels for Python 3.6 and newer. 📝",
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
install_requires=["click", "requests", "pytoml", "attrs"],
entry_points={"console_scripts": ["labels = labels.cli:labels"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=["github", "command-line"],
)
| 2.015625 | 2 |
colab/__init__.py | caseywstark/colab | 1 | 4527 | <gh_stars>1-10
# -*- coding: utf-8 -*-
__about__ = """
This project demonstrates a social networking site. It provides profiles,
friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps,
locations and user-to-user messaging.
In 0.5 this was called "complete_project".
"""
| 1.445313 | 1 |
src/ralph/ui/forms/util.py | quamilek/ralph | 0 | 4528 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.business.models import Venture, VentureRole
def all_ventures():
yield '', '---------'
for v in Venture.objects.filter(show_in_ralph=True).order_by('path'):
yield (
v.id,
"%s[%s] %s" % (
'\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space'
v.symbol,
v.name,
)
)
def all_roles():
yield '', '---------'
for r in VentureRole.objects.order_by(
'-venture__is_infrastructure', 'venture__name',
'parent__parent__name', 'parent__name', 'name'
):
yield r.id, '{} / {}'.format(r.venture.name, r.full_name)
| 2.078125 | 2 |
tests/syntax/missing_in_with_for.py | matan-h/friendly | 287 | 4529 | <filename>tests/syntax/missing_in_with_for.py
for x range(4):
print(x)
| 1.726563 | 2 |
services/users/manage.py | eventprotocol/event-protocol-webapp | 0 | 4530 | """
manage.py for flask application
"""
import unittest
import coverage
import os
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.models import User
# Code coverage
COV = coverage.Coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py',
]
)
COV.start()
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command()
def cov():
"""
Runs the unit tests with coverage
"""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return -1
@cli.command()
def recreate_db():
"""
Destroys all db and recreates a new db
"""
db.drop_all()
db.create_all()
db.session.commit()
@cli.command()
def test():
"""
Runs test without code coverage
"""
tests = unittest.TestLoader().discover(
'project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
else:
return -1
@cli.command()
def seed_db():
"""
Seeds the database with some initial data
"""
user1 = User(
eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower())
user1.username = "Meeting Room Of The Century"
user1.email = "<EMAIL>"
user1.city_country = "Singapore, SG"
user1.tags = "Meeting Spaces"
user1.about = '''This is the best meeting space you will ever see'''
user1.seller_detail = '''We sell space'''
user1.buyer_detail = '''We are not buying'''
user2 = User(
eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower())
user2.username = "Makeup Till You Breakup"
user2.email = "<EMAIL>"
user2.city_country = "Singapore, SG"
user2.tags = "Stylist"
user2.about = '''Reimagine your looks with us'''
user2.seller_detail = '''We are serving looks tonight'''
user2.buyer_detail = '''We are not buying'''
user3 = User(
eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower())
user3.username = "Heart Attack Buffet"
user3.email = "<EMAIL>"
user3.city_country = "Singapore, SG"
user3.tags = "Buffet"
user3.about = '''Eat till you get a heart attack'''
user3.seller_detail = '''We sell food'''
user3.buyer_detail = '''We are not buying'''
user4 = User(
eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower())
user4.username = "Pleasant Photography"
user4.email = "<EMAIL>"
user4.city_country = "Singapore, SG"
user4.tags = "Photography"
user4.about = ('We are a group of photographers specialized in wedding'
'photography. '
'We have won numerous awards for our photos. '
'We will capture your '
'memories in ways you cannot imagine.')
user4.seller_detail = '''We sell photos'''
user4.buyer_detail = '''We are not buying'''
user5 = User(
eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower())
user5.username = "Epic Winebar"
user5.email = "<EMAIL>"
user5.city_country = "Singapore, SG"
user5.tags = "Bar, Restaurant"
user5.about = ('Award winnning winebar with the best selection of alcohol.'
'We serve delicious international cuisine, with fusion'
'dishes inspired from our travels. We are always ready for'
'your craziest events.')
user5.seller_detail = '''We sell wine'''
user5.buyer_detail = '''We are not buying'''
user6 = User(
eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower())
user6.username = "Dancers Who Dance"
user6.email = "<EMAIL>"
user6.city_country = "Singapore, SG"
user6.tags = "Performer"
user6.about = ('Dancers who dance are people who like to dance alot.'
'Give us music and we will dance for you.')
user6.seller_detail = '''We sell dance'''
user6.buyer_detail = '''We are not buying'''
db.session.add(user1)
db.session.add(user2)
db.session.add(user3)
db.session.add(user4)
db.session.add(user5)
db.session.add(user6)
db.session.commit()
if __name__ == '__main__':
cli()
| 2.6875 | 3 |
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py | erelcan/keras-transformer | 3 | 4531 | <gh_stars>1-10
import os
from keras.callbacks import ModelCheckpoint
from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC
from keras_transformer.utils.io_utils import save_to_pickle
class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC):
def __init__(self, workspace_path, artifacts, callbacks, **kwargs):
super().__init__(os.path.join(workspace_path, "model-{epoch:01d}.h5"), **kwargs)
self._workspace_path = workspace_path
self._artifacts = artifacts
self._completed_epoch = 0
self._callbacks = callbacks
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
self._completed_epoch += 1
self.update_artifacts()
should_save = False
if self.epochs_since_last_save == 0:
if self.save_best_only:
current = logs.get(self.monitor)
if current == self.best:
should_save = True
else:
should_save = True
if should_save:
save_to_pickle(self._artifacts, os.path.join(self._workspace_path, "artifacts-" + str(epoch+1) + ".pkl"))
def update_artifacts(self):
for callback in self._callbacks:
self._artifacts["callbacks"][callback.get_name()] = callback.get_artifacts()
self._artifacts["callbacks"][self.get_name()] = self.get_artifacts()
def get_name(self):
return self.__class__.__name__
def get_artifacts(self):
return {"best_score": self.best, "completed_epoch": self._completed_epoch}
def prepare_from_artifacts(self, artifacts):
self.best = artifacts["best_score"]
self._completed_epoch = artifacts["completed_epoch"]
| 2.15625 | 2 |
train_test_val.py | arashk7/Yolo5_Dataset_Generator | 0 | 4532 | import os
import shutil
input_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5'
output_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5\ZhitangYolo5'
in_img_dir = os.path.join(input_dir, 'Images')
in_label_dir = os.path.join(input_dir, 'Labels')
out_img_dir = os.path.join(output_dir, 'images')
out_label_dir = os.path.join(output_dir, 'labels')
splits = {'train','test','valid'}
files = os.listdir(in_img_dir)
count = len(files)
for f in files:
print(f)
src = os.path.join(input_dir,f)
shutil.copyfile(src, dst)
| 2.515625 | 3 |
homeassistant/components/media_player/pjlink.py | dauden1184/home-assistant | 4 | 4533 | """
Support for controlling projector via the PJLink protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pjlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pypjlink2==1.2.0']
_LOGGER = logging.getLogger(__name__)
CONF_ENCODING = 'encoding'
DEFAULT_PORT = 4352
DEFAULT_ENCODING = 'utf-8'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
})
SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PJLink platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
encoding = config.get(CONF_ENCODING)
password = config.get(CONF_PASSWORD)
if 'pjlink' not in hass.data:
hass.data['pjlink'] = {}
hass_data = hass.data['pjlink']
device_label = "{}:{}".format(host, port)
if device_label in hass_data:
return
device = PjLinkDevice(host, port, name, encoding, password)
hass_data[device_label] = device
add_entities([device], True)
def format_input_source(input_source_name, input_source_number):
"""Format input source for display in UI."""
return "{} {}".format(input_source_name, input_source_number)
class PjLinkDevice(MediaPlayerDevice):
"""Representation of a PJLink device."""
def __init__(self, host, port, name, encoding, password):
"""Iinitialize the PJLink device."""
self._host = host
self._port = port
self._name = name
self._password = password
self._encoding = encoding
self._muted = False
self._pwstate = STATE_OFF
self._current_source = None
with self.projector() as projector:
if not self._name:
self._name = projector.get_name()
inputs = projector.get_inputs()
self._source_name_mapping = \
{format_input_source(*x): x for x in inputs}
self._source_list = sorted(self._source_name_mapping.keys())
def projector(self):
"""Create PJLink Projector instance."""
from pypjlink import Projector
projector = Projector.from_address(
self._host, self._port, self._encoding)
projector.authenticate(self._password)
return projector
def update(self):
"""Get the latest state from the device."""
with self.projector() as projector:
pwstate = projector.get_power()
if pwstate == 'off':
self._pwstate = STATE_OFF
else:
self._pwstate = STATE_ON
self._muted = projector.get_mute()[1]
self._current_source = \
format_input_source(*projector.get_input())
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def source(self):
"""Return current input source."""
return self._current_source
@property
def source_list(self):
"""Return all available input sources."""
return self._source_list
@property
def supported_features(self):
"""Return projector supported features."""
return SUPPORT_PJLINK
def turn_off(self):
"""Turn projector off."""
with self.projector() as projector:
projector.set_power('off')
def turn_on(self):
"""Turn projector on."""
with self.projector() as projector:
projector.set_power('on')
def mute_volume(self, mute):
"""Mute (true) of unmute (false) media player."""
with self.projector() as projector:
from pypjlink import MUTE_AUDIO
projector.set_mute(MUTE_AUDIO, mute)
def select_source(self, source):
"""Set the input source."""
source = self._source_name_mapping[source]
with self.projector() as projector:
projector.set_input(*source)
| 2.34375 | 2 |
leetcode/regex_matching.py | Kaushalya/algo_journal | 0 | 4534 | <reponame>Kaushalya/algo_journal
# Level: Hard
def isMatch(s: str, p: str) -> bool:
if not p:
return not s
n_s = len(s)
n_p = len(p)
j = 0
i = -1
while i < n_s-1:
i = i+ 1
if j >= n_p:
return False
if p[j] == '*':
while s[i]==s[i-1]:
i += 1
j += 1
if p[j] == '.' or s[i] == p[j]:
j += 1
# continue
elif s[i] != p[j] and j<n_p-1:
j += 2
else:
return False
return True
if __name__ == "__main__":
ss = 'abbbbbc'
p = 'a*'
print(isMatch(ss, p)) | 3.546875 | 4 |
tests/factories.py | luzik/waliki | 324 | 4535 | <reponame>luzik/waliki<filename>tests/factories.py
import factory
from django.contrib.auth.models import User, Group, Permission
from waliki.models import ACLRule, Page, Redirect
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: u'user{0}'.format(n))
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
class Meta:
model = User
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class GroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.user_set.add(user)
class ACLRuleFactory(factory.django.DjangoModelFactory):
class Meta:
model = ACLRule
name = factory.Sequence(lambda n: u'Rule {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for perm in extracted:
if not isinstance(perm, Permission):
perm = Permission.objects.get(content_type__app_label='waliki', codename=perm)
self.permissions.add(perm)
@factory.post_generation
def users(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.users.add(user)
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
class PageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: u'Page {0}'.format(n))
slug = factory.Sequence(lambda n: u'page{0}'.format(n))
@factory.post_generation
def raw(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
self.raw = extracted
class Meta:
model = Page
class RedirectFactory(factory.django.DjangoModelFactory):
old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n))
new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n))
class Meta:
model = Redirect
| 2.21875 | 2 |
nxt_editor/commands.py | dalteocraft/nxt_editor | 131 | 4536 | # Built-in
import copy
import logging
import time
# External
from Qt.QtWidgets import QUndoCommand
# Internal
from nxt_editor import colors
from nxt_editor import user_dir
from nxt import nxt_path
from nxt.nxt_layer import LAYERS, SAVE_KEY
from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,
list_merger)
from nxt import nxt_io
from nxt import GRID_SIZE
import nxt_editor
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
def processing(func):
def wrapper(self):
self.model.processing.emit(True)
func(self)
self.model.processing.emit(False)
return wrapper
class NxtCommand(QUndoCommand):
def __init__(self, model):
super(NxtCommand, self).__init__()
self.model = model
self.model.layer_saved.connect(self.reset_layer_effected)
self._layers_effected_by_me = {}
def _get_effects(self, layer_path):
"""Gets the effected state for a given layer with context to this
command. Since a single command can effect layers in different ways.
:param layer_path: string of layer real path
:return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo)
"""
first_eff_by_undo = False
first_eff_by_redo = False
try:
first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo']
except KeyError:
pass
try:
first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo']
except KeyError:
pass
return first_eff_by_undo, first_eff_by_redo
def reset_layer_effected(self, layer_just_saved):
"""When the model marks a layer as saved we reset the class attr
`_first_effected_by_redo` to False. This makes sure the layer is
properly marked as unsaved even if we undo an action after saving it.
:param layer_just_saved: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved)
where_were_at = self.model.undo_stack.index()
cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1))
if cur_cmd is self:
return
if layer_just_saved in self._layers_effected_by_me:
if eff_by_undo:
# This command has already been marked as undo effects the
# layer, meaning the layer has been saved and the undo queue
# was moved to an index before this command and the same
# layer was saved again.
eff_by_redo = True
eff_by_undo = False
else:
# Now the undo of this command effects the layer not the redo
eff_by_redo = False
eff_by_undo = True
self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo,
'redo': eff_by_redo}
def redo_effected_layer(self, layer_path):
"""Adds layer to the model's set of effected (unsaved) layers. If
this command was the first to effect the layer we mark it as such
by setting the class attr `_first_effected_by_redo` to True.
:param layer_path: string of layer real path
:return: None
"""
layer_unsaved = layer_path in self.model.effected_layers
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
if not eff_by_undo and layer_unsaved:
return
if not eff_by_undo:
self._layers_effected_by_me[layer_path] = {'undo': False,
'redo': True}
self.model.effected_layers.add(layer_path)
else:
# Layer was saved and then undo was called, thus this redo has a
# net zero effect on the layer
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
def undo_effected_layer(self, layer_path):
"""Removes layer from the model's set of effected (unsaved) layers.
If the layer is not marked as effected in the model we mark it as
effected. This case happens when undo is called after a layer is saved.
:param layer_path: string of layer real path
:return: None
"""
eff_by_undo, eff_by_redo = self._get_effects(layer_path)
layer_saved = layer_path not in self.model.effected_layers
if layer_saved:
eff_by_undo = True
# Set redo to False since now its been saved & the undo effects it
eff_by_redo = False
self.model.effected_layers.add(layer_path)
elif eff_by_redo:
try:
self.model.effected_layers.remove(layer_path)
except KeyError: # Removed by a save action
pass
self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo,
'redo': eff_by_redo}
class AddNode(NxtCommand):
"""Add a node to the graph"""
def __init__(self, name, data, parent_path, pos, model, layer_path):
super(AddNode, self).__init__(model)
self.name = name
self.data = data
self.parent_path = parent_path
self.layer_path = layer_path
self.stage = model.stage
# command data
self.pos = pos or [0.0, 0.0]
self.prev_selection = self.model.selection
# resulting node
self.node_path = None
self.created_node_paths = []
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
dirty_nodes = []
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
_, dirty = self.stage.delete_node(node, layer,
remove_layer_data=False)
dirty_nodes += dirty
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
comp_layer = self.model.comp_layer
if node is not None:
# delete node
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data)
dirty_nodes += dirty
dirty_nodes += self.created_node_paths
dirty_nodes += [self.node_path]
self.undo_effected_layer(self.layer_path)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = self.prev_selection
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.created_node_paths = []
dirty_nodes = []
nodes, dirty = self.stage.add_node(name=self.name, data=self.data,
parent=self.parent_path,
layer=layer.layer_idx(),
comp_layer=self.model.comp_layer)
dirty_nodes += dirty
self.node_path = layer.get_node_path(nodes[0])
self.model._set_node_pos(node_path=self.node_path, pos=self.pos,
layer=layer)
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.model.selection = [self.node_path]
self.redo_effected_layer(layer.real_path)
self.setText('Added node: {}'.format(self.node_path))
class DeleteNode(NxtCommand):
def __init__(self, node_path, model, layer_path, other_removed_nodes):
"""Delete node from the layer at the layer path and the comp layer.
It is important to note that the other_removed_nodes
list must be shared by other DeleteNode commands in a command macro.
The list will be mutated by the stage as it deletes node, this
behavior is depended upon!
:param node_path: String of node path
:param model: StageModel
:param layer_path: String of layer realpath
:param other_removed_nodes: list of node paths that will be deleted
in this event loop.
"""
super(DeleteNode, self).__init__(model)
self.layer_path = layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
self.prev_starts = []
self.prev_breaks = {}
self.node_path = node_path
self.node_data = {}
self.others = other_removed_nodes
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
parent = self.node_data['parent']
# We don't want to fix names because we know this node should be
# named what it was named when it was deleted
new_nodes, dirty = self.stage.add_node(name=self.node_data['name'],
data=self.node_data['save_dict'],
parent=parent,
layer=layer.layer_idx(),
comp_layer=comp_layer,
fix_names=False)
if self.node_data['break']:
self.model._add_breakpoint(self.node_path, layer)
self.model._add_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._add_start_node(self.node_path, layer)
# restore layer data
pos = self.node_data.get('pos')
if pos:
self.model.top_layer.positions[self.node_path] = pos
# This might be a bug? We don't touch the top layer in redo...
self.undo_effected_layer(self.stage.top_layer.real_path)
attr_display = self.node_data.get('attr_display')
if attr_display is not None:
self.model._set_attr_display_state(self.node_path, attr_display)
user_dir.breakpoints = self.prev_breaks
ancestor_tuple = self.node_data.get('ancestor_child_order')
if ancestor_tuple:
ancestor_path, ancestor_child_order = ancestor_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER,
ancestor_child_order)
self.model.selection = self.prev_selection
# Fixme: Does not account for rebuilding proxy nodes for the dirty nodes
dirty_set = tuple(set(dirty))
self.undo_effected_layer(self.layer_path)
if dirty_set != (self.node_path,):
self.model.update_comp_layer(rebuild=True)
else:
self.model.nodes_changed.emit(dirty_set)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
comp_layer = self.model.comp_layer
self.node_data = {}
self.prev_starts = self.model.get_start_nodes(layer)
self.prev_breaks = user_dir.breakpoints
dirty_nodes = []
node = layer.lookup(self.node_path)
# get node info
parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
name = getattr(node, INTERNAL_ATTRS.NAME)
is_break = self.model.get_is_node_breakpoint(self.node_path, layer)
self.node_data = {'parent': parent, 'name': name,
'pos': self.model.get_node_pos(self.node_path),
'break': is_break}
closest_ancestor = layer.ancestors(self.node_path)
if closest_ancestor:
closest_ancestor = closest_ancestor[0]
else:
closest_ancestor = None
closest_ancestor_path = layer.get_node_path(closest_ancestor)
if closest_ancestor_path:
ancestor_child_order = getattr(closest_ancestor,
INTERNAL_ATTRS.CHILD_ORDER)
self.node_data['ancestor_child_order'] = (closest_ancestor_path,
ancestor_child_order[:])
# Attr display data
attr_display = self.model.get_attr_display_state(self.node_path)
if attr_display is not None:
self.node_data['attr_display'] = attr_display
# get layer data
is_start = self.model.get_is_node_start(self.node_path, layer)
self.node_data['start'] = is_start
self.node_data['save_dict'] = get_node_as_dict(node)
if self.node_data['break']:
self.model._remove_breakpoint(self.node_path, layer)
self.model._remove_breakpoint(self.node_path, self.stage.top_layer)
if self.node_data['start']:
self.model._remove_start_node(self.node_path, layer)
node = layer.lookup(self.node_path)
source_layer = self.stage.get_node_source_layer(node)
if source_layer.layer_idx() > 0:
rm_layer_data = True
else:
rm_layer_data = False
for p in self.others[:]:
self.others += comp_layer.get_node_dirties(p)
_, dirty = self.stage.delete_node(node, layer,
comp_layer=comp_layer,
remove_layer_data=rm_layer_data,
other_removed_nodes=self.others)
dirty_nodes += dirty + [self.node_path]
if self.node_path in self.model.selection:
fix_selection = self.model.selection[:]
fix_selection.remove(self.node_path)
self.model.selection = fix_selection
self.model.nodes_changed.emit(tuple(set(dirty_nodes)))
self.redo_effected_layer(layer.real_path)
self.setText("Delete node: {}".format(self.node_path))
class SetNodeAttributeData(NxtCommand):
"""Set attribute value"""
def __init__(self, node_path, attr_name, data, model, layer_path):
super(SetNodeAttributeData, self).__init__(model)
self.node_path = node_path
self.nice_attr_name = attr_name
self.attr_name = attr_name
self.data = data
self.stage = model.stage
self.layer_path = layer_path
self.created_node_paths = []
self.remove_attr = False
self.prev_data = {}
self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP
self.return_value = None
self.prev_selection = model.selection
@processing
def undo(self):
start = time.time()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
comp = self.model.comp_layer
dirties = [self.node_path]
# delete any created nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=layer, comp_layer=comp,
remove_layer_data=False)
n = layer.lookup(self.node_path)
if n is not None:
if self.remove_attr:
self.stage.delete_node_attr(n, self.attr_name)
dirties += comp.get_node_dirties(self.node_path)
else:
result = self.stage.node_setattr_data(node=n,
attr=self.attr_name,
layer=layer, create=False,
comp_layer=comp,
**self.prev_data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += result
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
self.model.attrs_changed.emit(changed_attrs)
if not self.recomp:
changed = tuple([self.node_path] + self.created_node_paths)
self.model.nodes_changed.emit(changed)
self.model.selection = self.prev_selection
# undo_debug(self, start)
@processing
def redo(self):
start = time.time()
created_node = False
self.prev_selection = self.model.selection
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
comp = self.model.comp_layer
self.remove_attr = False
self.created_node_paths = []
# get the node
node = layer.lookup(self.node_path)
dirties = [self.node_path]
if node is None:
parent_path = nxt_path.get_parent_path(self.node_path)
name = nxt_path.node_name_from_node_path(self.node_path)
if self.attr_name in INTERNAL_ATTRS.ALL:
self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name)
attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)}
else:
attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}}
self.return_value = self.attr_name
_, dirties = self.stage.add_node(name=name, data=attr_data,
parent=parent_path,
layer=layer.layer_idx(),
comp_layer=comp,
fix_names=False)
# Fixme: Targeted parenting would avoid the need for a recomp
if layer.descendants(self.node_path):
self.recomp = True
created_node = True
self.created_node_paths += [self.node_path]
node = layer.lookup(self.node_path)
self.prev_data = self.stage.get_node_attr_data(node, self.attr_name,
layer, quiet=True)
if self.prev_data:
self.prev_data = copy.deepcopy(self.prev_data)
# set attribute value this also adds the attribute if it does not exist
if not self.stage.node_attr_exists(node, self.attr_name):
self.remove_attr = True
if not created_node:
self.return_value = self.stage.node_setattr_data(node,
self.attr_name,
layer=layer,
create=True,
comp_layer=comp,
**self.data)
if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:
dirties += self.return_value
if self.attr_name in INTERNAL_ATTRS.ALL:
dirties += comp.get_node_dirties(self.node_path)
if self.recomp:
self.model.update_comp_layer(rebuild=self.recomp)
else:
if (self.remove_attr or self.created_node_paths or
self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,
INTERNAL_ATTRS.PARENT_PATH)):
self.model.nodes_changed.emit(dirties)
else:
changed_attrs = ()
for dirty in dirties:
attr_path = nxt_path.make_attr_path(dirty, self.attr_name)
changed_attrs += (attr_path,)
self.model.attrs_changed.emit(changed_attrs)
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
val = str(self.data.get(META_ATTRS.VALUE))
self.setText("Set {} to {}".format(attr_path, val))
# redo_debug(self, start)
class SetNodeAttributeValue(SetNodeAttributeData):
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data,
model, layer_path)
class RenameNode(SetNodeAttributeValue):
"""Rename node"""
def __init__(self, node_path, name, model, layer_path):
self.old_node_path = node_path
layer = model.lookup_layer(layer_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(name=name, layer=layer,
parent_path=parent_path,
layer_only=True)
super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME,
new_name, model, layer_path)
def undo(self):
self.model.about_to_rename.emit()
self.prev_data['force'] = True
super(RenameNode, self).undo()
self.node_path = self.old_node_path
self.model.selection = [self.node_path]
def redo(self):
self.model.about_to_rename.emit()
super(RenameNode, self).redo()
self.node_path = self.return_value
self.model.selection = [self.node_path]
if self.model.get_is_node_start(self.node_path, self.model.comp_layer):
self.model.starts_changed.emit(self.model.get_start_nodes())
self.setText("{} renamed to {}".format(self.old_node_path,
self.return_value))
class DuplicateNodes(NxtCommand):
"""Duplicate nodes on this graph"""
def __init__(self, node_paths, descendants, model, source_layer_path,
target_layer_path):
# TODO: We should make another base command class that can be used to
# set multiple attr's data. That way duplicate can just be a
# setattr. The way it works now we can only set one attr's data at a
# time and duplicate needs to get local + INTERNAL number of attrs.
super(DuplicateNodes, self).__init__(model)
self.node_paths = node_paths
self.descendants = descendants
self.source_layer_path = source_layer_path
self.target_layer_path = target_layer_path
self.stage = model.stage
# get undo data
self.prev_selection = self.model.selection
# resulting nodes
self.new_node_paths = []
@processing
def undo(self):
target_layer = self.model.lookup_layer(self.target_layer_path)
# delete duplicated nodes
for node_path in self.new_node_paths:
n = target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, target_layer,
remove_layer_data=True)
self.model.selection = self.prev_selection
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(target_layer.real_path)
@processing
def redo(self):
new_selection = []
self.new_node_paths = []
source_layer = self.model.lookup_layer(self.source_layer_path)
target_layer = self.model.lookup_layer(self.target_layer_path)
self.redo_effected_layer(target_layer.real_path)
for node_path in self.node_paths:
node = source_layer.lookup(node_path)
# duplicate node
new, dirty = self.stage.duplicate_node(node=node,
layer=target_layer,
descendants=self.descendants)
new_selection.append(target_layer.get_node_path(new[0]))
# process new nodes
for new_node in new:
# add new node path to the list and emit model signal
new_node_path = target_layer.get_node_path(new_node)
self.new_node_paths += [new_node_path]
# self.model.node_added.emit(new_node_path)
# set position
has_parent = self.model.node_has_parent(new_node_path,
target_layer)
if not has_parent and new_node_path != node_path:
pos = self.model.get_node_pos(node_path)
pos = [pos[0] + 20, pos[1] + 20]
self.model._set_node_pos(new_node_path, pos,
layer=target_layer)
self.model.selection = new_selection
self.model.update_comp_layer(rebuild=True)
if len(self.node_paths) == 1:
nodes_str = self.node_paths[0]
else:
nodes_str = 'nodes'
self.setText('Duplicated {}'.format(nodes_str))
class InstanceNode(SetNodeAttributeValue):
"""Instance nodes on this graph"""
def __init__(self, node_path, model, source_layer_path, target_layer_path):
src_name = nxt_path.node_name_from_node_path(node_path)
parent_path = nxt_path.get_parent_path(node_path)
new_name = model.stage.get_unique_node_name(src_name,
model.comp_layer,
parent_path=parent_path)
new_path = nxt_path.join_node_paths(parent_path, new_name)
self.new_path = new_path
super(InstanceNode, self).__init__(new_path,
INTERNAL_ATTRS.INSTANCE_PATH,
node_path, model, target_layer_path)
def redo(self):
node_path = self.data.get(META_ATTRS.VALUE)
layer = self.model.lookup_layer(self.layer_path)
new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0),
layer)
self.model._set_node_pos(self.new_path, new_pos, layer)
super(InstanceNode, self).redo()
self.return_value = self.new_path
self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))
class SetNodesPosition(NxtCommand):
"""Move nodes"""
def __init__(self, node_positions, model, layer_path):
super(SetNodesPosition, self).__init__(model)
self.model = model
self.layer_path = layer_path
self.new_positions = node_positions
self.old_positions = {}
for path in self.new_positions.keys():
self.old_positions[path] = model.get_node_pos(path)
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
for node_path, old_pos in self.old_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=old_pos, layer=layer)
self.undo_effected_layer(self.layer_path)
@processing
def redo(self):
delta_str = None
layer = self.model.lookup_layer(self.layer_path)
for node_path, new_pos in self.new_positions.items():
self.model._set_node_pos(node_path=node_path,
pos=new_pos, layer=layer)
if not delta_str:
pos = new_pos
prev_pos = self.old_positions[node_path]
# Only letting it set text once, relying on consistent delta.
x_delta = pos[0] - prev_pos[0]
y_delta = pos[1] - prev_pos[1]
delta_str = '{}, {}'.format(x_delta, y_delta)
if len(self.new_positions) == 1:
nodes_str = node_path
else:
nodes_str = 'nodes'
self.setText('Move {} {}'.format(nodes_str, delta_str))
self.redo_effected_layer(layer.real_path)
class SetSelection(QUndoCommand):
"""Select Nodes and Connections"""
def __init__(self, paths, model):
super(SetSelection, self).__init__()
self.new_paths = paths
self.model = model
self.prev_paths = self.model.selection
def undo(self):
self.model.selection = self.prev_paths
def redo(self):
self.model.selection = self.new_paths
self.setText('Set selection: {}'.format(str(self.new_paths)))
class AddSelection(SetSelection):
def __init__(self, paths, model):
self.added_paths = paths
curr_selection = model.selection
new_paths = curr_selection + paths
super(AddSelection, self).__init__(new_paths, model)
def redo(self):
super(AddSelection, self).redo()
self.setText('Add {} to selection'.format(self.added_paths))
class RemoveFromSelection(SetSelection):
def __init__(self, paths, model):
self.rem_paths = paths
new_selection = model.selection[:]
for path in paths:
try:
new_selection.remove(path)
except ValueError:
continue
super(RemoveFromSelection, self).__init__(new_selection, model)
def redo(self):
super(RemoveFromSelection, self).redo()
self.setText('Remove {} from selection'.format(self.rem_paths))
class LocalizeNodes(NxtCommand):
"""Localize nodes"""
def __init__(self, node_paths, model):
super(LocalizeNodes, self).__init__(model)
self.node_paths = node_paths
self.model = model
self.stage = model.stage
self.prev_selection = self.model.selection
self.prev_node_data = {}
self.created_node_paths = []
@processing
def undo(self):
for node_path in self.created_node_paths:
n = self.model.target_layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer=self.model.target_layer,
remove_layer_data=False)
layers = [self.model.target_layer]
for node_path, all_data in self.prev_node_data.items():
apply_data = {}
node = self.model.target_layer.lookup(node_path)
if not node:
continue
data = all_data['data']
child_order = all_data['data'].get('child_order', [])
apply_data['child_order'] = child_order
apply_data['attributes'] = data.get('attributes', {})
attrs_to_keep = apply_data['attributes'].keys()
apply_data['enabled'] = data.get('enabled')
if data.get('instance'):
apply_data['instance'] = data['instance']
self.stage.transfer_node_data(node, self.model.target_layer,
apply_data, self.model.comp_layer)
local_attrs = self.stage.get_node_local_attr_names(node_path,
layers)
for attr in local_attrs:
if attr not in attrs_to_keep:
self.stage.delete_node_attr(node=node, attr_name=attr)
self.model.update_comp_layer(rebuild=True)
self.undo_effected_layer(layers[0].real_path)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.created_node_paths = []
layer = self.model.target_layer
for node_path in self.node_paths:
node_data = {}
display_node = self.model.comp_layer.lookup(node_path)
if not display_node:
continue
# add node if it doesn't exist on the target layer
target_node = self.model.target_layer.lookup(node_path)
if not target_node:
new_nodes, new_paths, dirty = _add_node_hierarchy(node_path,
self.model,
layer)
target_node = new_nodes[-1]
self.created_node_paths += new_paths
# self.model.node_added.emit(node_path)
# preserve original data
node_data['data'] = get_node_as_dict(target_node)
# localize source node
self.stage.transfer_node_data(target_node, self.model.target_layer,
display_node,
self.model.comp_layer)
self.prev_node_data[node_path] = node_data
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.redo_effected_layer(layer.real_path)
self.model.selection = self.prev_selection
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText('Localize {}'.format(str(path_str)))
class LocalizeUserAttr(SetNodeAttributeData):
"""Localize nodes"""
def __init__(self, node_path, attr_name, model, layer_path):
node = model.comp_layer.lookup(node_path)
data = model.stage.get_node_attr_data(node, attr_name,
model.comp_layer)
if META_ATTRS.SOURCE in data:
data.pop(META_ATTRS.SOURCE)
super(LocalizeUserAttr, self).__init__(node_path, attr_name, data,
model, layer_path)
class LocalizeCompute(SetNodeAttributeValue):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path):
comp_layer = model.comp_layer
display_node = comp_layer.lookup(node_path)
code_lines = model.stage.get_node_code_lines(display_node, comp_layer)
super(LocalizeCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(LocalizeCompute, self).redo()
self.setText("Localize compute on {}".format(self.node_path))
class LocalizeInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
inst_path = model.get_node_instance_path(node_path, model.comp_layer,
expand=False)
super(LocalizeInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
inst_path, model, layer_path)
def redo(self):
super(LocalizeInstancePath, self).redo()
self.setText("Localize instance path to {}".format(self.node_path))
class RevertInstancePath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertInstancePath, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
None, model, layer_path)
def redo(self):
super(RevertInstancePath, self).redo()
self.setText("Revert instance path on {}".format(self.node_path))
class LocalizeExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
exec_path = model.get_node_exec_in(node_path)
super(LocalizeExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_path, model, layer_path)
def redo(self):
super(LocalizeExecPath, self).redo()
self.setText("Localize exec input on {}".format(self.node_path))
class RevertExecPath(SetNodeAttributeValue):
def __init__(self, node_path, model, layer_path):
super(RevertExecPath, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN, None,
model, layer_path)
def redo(self):
self.setText("Revert exec input on {}".format(self.node_path))
class RevertNode(DeleteNode):
"""Localize nodes"""
def __init__(self, node_path, model, layer_path, others):
super(RevertNode, self).__init__(node_path, model, layer_path, others)
self.rebuild = False # Tells the delete command not to re-comp
self.created_node_paths = []
self.node_path = node_path
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
# Remove our created empty nodes
for node_path in self.created_node_paths:
n = layer.lookup(node_path)
if n is not None:
self.stage.delete_node(n, layer, remove_layer_data=False)
super(RevertNode, self).undo()
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
def redo(self):
self.created_node_paths = []
super(RevertNode, self).redo()
layer = self.model.lookup_layer(self.layer_path)
# Re-create the node as an empty node
new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path,
self.model, layer)
self.created_node_paths += new_paths
self.model.update_comp_layer(rebuild=bool(self.created_node_paths))
self.model.selection = self.prev_selection
self.setText('Revert {}'.format(self.node_path))
class ParentNodes(NxtCommand):
"""Parent Nodes"""
def __init__(self, node_paths, parent_node_path, model):
super(ParentNodes, self).__init__(model)
self.parent_node_path = parent_node_path
self.parent_node = None
self.model = model
self.stage = model.stage
self.node_paths = node_paths
# resulting nodes
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
# get node selection for undo
self.prev_selection = self.model.selection
# get previous node data for all child nodes for undo
self.prev_node_data = {}
@processing
def undo(self):
layer = self.model.target_layer
self.undo_effected_layer(layer.real_path)
# undo parent
common_parent_nodes = {}
for old_path, node_data in self.prev_node_data.items():
prev_parent_path = node_data['parent']
prev_parent_node = layer.lookup(prev_parent_path)
new_path = self.node_path_data[old_path]
node = layer.lookup(new_path)
if prev_parent_path not in list(common_parent_nodes.keys()):
common_parent_nodes[prev_parent_path] = {node: old_path}
else:
common_parent_nodes[prev_parent_path][node] = old_path
child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER)
if child_order_tuple:
ancestor_path, child_order = child_order_tuple
ancestor = layer.lookup(ancestor_path)
if ancestor:
self.stage.set_node_child_order(ancestor, child_order,
layer)
if new_path in list(self.model.top_layer.positions.keys()):
source_layer = self.stage.get_node_source_layer(node)
source_layer.positions.pop(new_path)
for parent_path, nodes_dict in common_parent_nodes.items():
self.stage.parent_nodes(nodes=list(nodes_dict.keys()),
parent_path=parent_path,
layer=layer)
for parent_path, nodes_dict in common_parent_nodes.items():
for node, old_path in nodes_dict.items():
node_data = self.prev_node_data[old_path]
# restore name
prev_name = node_data['name']
name = getattr(node, INTERNAL_ATTRS.NAME)
if name != prev_name:
self.stage.set_node_name(node, name=prev_name,
layer=layer, force=True)
# restore position
if self.parent_node_path != nxt_path.WORLD:
prev_pos = node_data['pos']
source_layer = self.stage.get_node_source_layer(node)
self.model._set_node_pos(old_path, prev_pos,
layer=source_layer)
# delete any created nodes
for node_path in self.created_node_paths:
node = layer.lookup(node_path)
if node is not None:
self.stage.delete_node(node, layer)
idx = 0
for old_node_path in self.node_paths:
new_node_path = self.new_node_paths[idx]
attr_state = self.model.remove_attr_display_state(new_node_path)
if attr_state is not None:
self.model._set_attr_display_state(old_node_path, attr_state)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = self.prev_selection
@processing
def redo(self):
self.prev_node_data = {}
self.node_path_data = {}
self.new_node_paths = []
self.created_node_paths = []
nodes = []
layer = self.model.target_layer
self.redo_effected_layer(layer.real_path)
for node_path in self.node_paths:
node = layer.lookup(node_path)
name = getattr(node, INTERNAL_ATTRS.NAME)
parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH)
self.stage.get_node_data(node, layer)
node_data = self.stage.get_node_data(node, layer)
node_data['pos'] = self.model.get_node_pos(node_path)
node_data['name'] = name
node_data['parent'] = parent_path
parent_node = layer.lookup(parent_path)
ancestor_path = parent_path
child_order = []
if parent_node:
child_order = getattr(parent_node,
INTERNAL_ATTRS.CHILD_ORDER)
else:
ancestors = layer.ancestors(node_path)
if ancestors:
ancestor = ancestors[0]
ancestor_path = layer.get_node_path(ancestor)
child_order = self.stage.get_node_child_order(ancestor)
node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path,
child_order]
self.prev_node_data[node_path] = node_data
nodes += [node]
# get current node hierarchy information for each node. each node
# path is placed in a list of descendants for each top node so when
# they are un-parented each node can be placed visually beside it's
# original top node.
node_hierarchy_data = {}
if self.parent_node_path is nxt_path.WORLD:
for node_path in self.node_paths:
node = layer.lookup(node_path)
top_node = self.stage.get_top_node(node,
self.model.target_layer)
if top_node is None:
top_node = node
top_node_path = layer.get_node_path(top_node)
top_node_descendant_list = node_hierarchy_data.get(top_node, [])
top_node_descendant_list += [node]
node_hierarchy_data[top_node_path] = top_node_descendant_list
if not node_hierarchy_data:
return
# parent
self.node_path_data = self.stage.parent_nodes(nodes,
self.parent_node_path,
layer)
self.new_node_paths = list(self.node_path_data.values())
idx = 0
for new_node_path in self.new_node_paths:
old_node_path = self.node_paths[idx]
attr_state = self.model.remove_attr_display_state(old_node_path)
if attr_state is not None:
self.model._set_attr_display_state(new_node_path, attr_state)
# set position for un-parent
if self.parent_node_path == nxt_path.WORLD:
old_root = nxt_path.get_root_path(old_node_path)
new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14,
GRID_SIZE),
self.model.top_layer)
self.model._set_node_pos(new_node_path, new_pos, layer)
idx += 1
self.model.update_comp_layer(rebuild=True)
self.model.selection = list(self.node_path_data.values())
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
self.setText("Parent {} to {}".format(path_str, self.parent_node_path))
class AddAttribute(SetNodeAttributeData):
"""Add an attribute to a node."""
def __init__(self, node_path, attr_name, value, model, layer_path):
data = {META_ATTRS.VALUE: value}
super(AddAttribute, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(AddAttribute, self).redo()
self.remove_attr = True
self.setText("Add {} attr to {}".format(self.attr_name,
self.node_path))
class DeleteAttribute(AddAttribute):
"""Delete attribute on a node"""
def __init__(self, node_path, attr_name, model, layer_path):
super(DeleteAttribute, self).__init__(node_path, attr_name, None,
model, layer_path)
# Get the data to be set if undo is called
layer = self.model.lookup_layer(self.layer_path)
node = layer.lookup(self.node_path)
self.data = self.stage.get_node_attr_data(node, self.attr_name, layer)
def undo(self):
super(DeleteAttribute, self).redo()
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
def redo(self):
# Overload remove attr here to insure attr is deleted
self.remove_attr = True
super(DeleteAttribute, self).undo()
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.setText("Remove {} attr from {}".format(self.attr_name,
self.node_path))
class RevertCompute(SetNodeAttributeValue):
"""Revert compute"""
def __init__(self, node_path, model, layer_path):
super(RevertCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE, [], model,
layer_path)
def redo(self):
super(RevertCompute, self).redo()
self.setText("Revert compute on {}".format(self.node_path))
class RenameAttribute(NxtCommand):
"""Rename attribute"""
def __init__(self, node_path, attr_name, new_attr_name, model, layer_path):
super(RenameAttribute, self).__init__(model)
self.node_path = node_path
self.attr_name = attr_name
self.new_attr_name = new_attr_name
self.model = model
self.stage = model.stage
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.new_attr_name, self.attr_name)
self.undo_effected_layer(layer.real_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.rename_attribute(layer, self.attr_name, self.new_attr_name)
self.redo_effected_layer(layer.real_path)
def rename_attribute(self, layer, attr_name, new_attr_name):
node = layer.lookup(self.node_path)
self.stage.rename_node_attr(node, attr_name, new_attr_name, layer)
self.model.update_comp_layer()
old_name = nxt_path.make_attr_path(self.node_path, attr_name)
new_name = nxt_path.make_attr_path(self.node_path, new_attr_name)
self.setText("Rename {} to {}".format(old_name, new_name))
class SetAttributeComment(SetNodeAttributeData):
"""Set attribute comment"""
def __init__(self, node_path, attr_name, comment, model, layer_path):
data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment}
super(SetAttributeComment, self).__init__(node_path, attr_name, data,
model, layer_path)
def redo(self):
super(SetAttributeComment, self).redo()
attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)
self.setText("Changed comment on {}".format(attr_path))
class SetCompute(SetNodeAttributeValue):
"""Set node code value"""
def __init__(self, node_path, code_lines, model, layer_path):
super(SetCompute, self).__init__(node_path,
INTERNAL_ATTRS.COMPUTE,
code_lines, model, layer_path)
def redo(self):
super(SetCompute, self).redo()
self.setText("Changed compute on {}".format(self.node_path))
class SetNodeComment(SetNodeAttributeValue):
"""Set node comment"""
def __init__(self, node_path, comment, model, layer_path):
super(SetNodeComment, self).__init__(node_path,
INTERNAL_ATTRS.COMMENT,
comment, model, layer_path)
def redo(self):
super(SetNodeComment, self).redo()
self.setText("Changed comment on {}".format(self.node_path))
class SetNodeInstance(SetNodeAttributeValue):
"""Set node instance"""
def __init__(self, node_path, instance_path, model, layer_path):
super(SetNodeInstance, self).__init__(node_path,
INTERNAL_ATTRS.INSTANCE_PATH,
instance_path, model, layer_path)
def redo(self):
super(SetNodeInstance, self).redo()
txt = ("Set inst path on "
"{} to {}".format(self.node_path,
self.data.get(META_ATTRS.VALUE)))
self.setText(txt)
class SetNodeEnabledState(SetNodeAttributeValue):
"""Set node enabled state"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeEnabledState, self).__init__(node_path,
INTERNAL_ATTRS.ENABLED,
value, model, layer_path)
def redo(self):
super(SetNodeEnabledState, self).redo()
if self.data.get(META_ATTRS.VALUE):
self.setText("Enabled {}".format(self.node_path))
else:
self.setText("Disabled {}".format(self.node_path))
class SetNodeCollapse(NxtCommand):
"""Set the node collapse state"""
def __init__(self, node_paths, value,
model, layer_path):
super(SetNodeCollapse, self).__init__(model)
self.node_paths = node_paths
self.value = value
self.model = model
self.stage = model.stage
self.layer_path = layer_path
self.prev_values = {}
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
self.undo_effected_layer(layer.real_path)
for node_path, prev_value in self.prev_values.items():
layer.collapse[node_path] = prev_value
self.model.comp_layer.collapse[node_path] = prev_value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
self.redo_effected_layer(layer.real_path)
self.prev_values = {}
for np in self.node_paths:
self.prev_values[np] = self.model.get_node_collapse(np, layer)
for node_path in self.node_paths:
layer.collapse[node_path] = self.value
self.model.comp_layer.collapse[node_path] = self.value
self.model.collapse_changed.emit(list(self.prev_values.keys()))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Collapsed {}".format(path_str))
else:
self.setText("Expanded {}".format(path_str))
class SetNodeExecuteSources(SetNodeAttributeValue):
"""Set node execute sources"""
def __init__(self, node_path, exec_source, model, layer_path):
super(SetNodeExecuteSources, self).__init__(node_path,
INTERNAL_ATTRS.EXECUTE_IN,
exec_source, model,
layer_path)
def redo(self):
super(SetNodeExecuteSources, self).redo()
val = self.data.get(META_ATTRS.VALUE)
if val is None:
self.setText("Removed exec input for {}".format(self.node_path))
return
self.setText("Set {} exec input to {}".format(self.node_path, val))
class SetNodeBreakPoint(QUndoCommand):
"""Set node as a break point"""
def __init__(self, node_paths, value, model, layer_path):
super(SetNodeBreakPoint, self).__init__()
self.node_paths = node_paths
self.value = value
self.model = model
self.layer_path = layer_path
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if not self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if self.value:
func = self.model._add_breakpoint
else:
func = self.model._remove_breakpoint
for node_path in self.node_paths:
func(node_path, layer)
self.model.nodes_changed.emit(tuple(self.node_paths))
if len(self.node_paths) == 1:
path_str = self.node_paths[0]
else:
path_str = str(self.node_paths)
if self.value:
self.setText("Add breakpoint to {}".format(path_str))
else:
self.setText("Remove breakpoint from {}".format(path_str))
class ClearBreakpoints(QUndoCommand):
"""Clear all the breakpoints for a given layer"""
def __init__(self, model, layer_path):
super(ClearBreakpoints, self).__init__()
self.model = model
self.layer_path = layer_path
self.prev_breaks = []
@processing
def undo(self):
user_dir.breakpoints[self.layer_path] = self.prev_breaks
self.model.nodes_changed.emit(tuple(self.prev_breaks))
@processing
def redo(self):
self.prev_breaks = user_dir.breakpoints.get(self.layer_path, [])
if self.layer_path in list(user_dir.breakpoints.keys()):
user_dir.breakpoints.pop(self.layer_path)
self.model.nodes_changed.emit(tuple(self.prev_breaks))
self.setText("Clear all breakpoints")
class SetNodeStartPoint(SetNodeAttributeValue):
"""Set this node as the execution start point"""
def __init__(self, node_path, value, model, layer_path):
super(SetNodeStartPoint, self).__init__(node_path,
INTERNAL_ATTRS.START_POINT,
value, model, layer_path)
class SetNodeChildOrder(SetNodeAttributeValue):
"""Set node child order"""
def __init__(self, node_path, child_order, model, layer_path):
super(SetNodeChildOrder, self).__init__(node_path,
INTERNAL_ATTRS.CHILD_ORDER,
child_order, model, layer_path)
def redo(self):
super(SetNodeChildOrder, self).redo()
self.setText("Change child order on {}".format(self.node_path))
class SetLayerAlias(NxtCommand):
"""Set Layer Alias"""
def __init__(self, alias, layer_path, model):
super(SetLayerAlias, self).__init__(model)
self.layer_path = layer_path
self.alias = alias
self.old_alias = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.set_alias(self.old_alias)
else:
layer.set_alias_over(self.old_alias)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_alias = layer.get_alias(local=True)
layer.set_alias(self.alias)
else:
self.old_alias = layer.get_alias(fallback_to_local=False)
layer.set_alias_over(self.alias)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_alias_changed.emit(self.layer_path)
self.setText("Set {} alias to {}".format(layer.filepath, self.alias))
class NewLayer(NxtCommand):
"""Add new layer"""
def __init__(self, file_path, file_name, idx, model, chdir):
super(NewLayer, self).__init__(model)
self.new_layer_path = None
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.file_name = file_name
self.chdir = chdir
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.new_layer_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(LAYERS.TOP)
self.undo_effected_layer(self.new_layer_path)
self.model.layer_removed.emit(self.new_layer_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS]
open_layer_colors = []
for layer in self.stage._sub_layers:
color = layer.color
if color:
color = color.lower()
open_layer_colors += [color]
layer_color = layer_color_index[0]
for c in layer_color_index:
if c not in open_layer_colors:
layer_color = c
break
real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir)
layer_data = {"parent_layer": parent_layer,
SAVE_KEY.FILEPATH: self.file_path,
SAVE_KEY.REAL_PATH: real_path,
SAVE_KEY.COLOR: layer_color,
SAVE_KEY.ALIAS: self.file_name
}
new_layer = self.stage.new_sublayer(layer_data=layer_data,
idx=self.insert_idx)
self.new_layer_path = new_layer.real_path
self.redo_effected_layer(new_layer.real_path)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.new_layer_path)
self.model.layer_added.emit(self.new_layer_path)
self.setText("New layer {}".format(self.new_layer_path))
class ReferenceLayer(NxtCommand):
"""Refernce existing layer"""
def __init__(self, file_path, idx, model, chdir):
super(ReferenceLayer, self).__init__(model)
self.model = model
self.stage = model.stage
self.insert_idx = idx
self.file_path = file_path
self.real_path = nxt_path.full_file_expand(self.file_path, chdir)
@processing
def undo(self):
new_layer = self.model.lookup_layer(self.real_path)
if new_layer in self.stage._sub_layers:
self.undo_effected_layer(new_layer.parent_layer.real_path)
self.stage.remove_sublayer(new_layer)
self.model.set_target_layer(LAYERS.TOP)
self.model.update_comp_layer(rebuild=True)
self.model.layer_removed.emit(self.real_path)
@processing
def redo(self):
sub_layer_count = len(self.stage._sub_layers)
if 0 < self.insert_idx <= sub_layer_count:
parent_layer = self.stage._sub_layers[self.insert_idx - 1]
self.redo_effected_layer(parent_layer.real_path)
else:
parent_layer = None
layer_data = nxt_io.load_file_data(self.real_path)
extra_data = {"parent_layer": parent_layer,
"filepath": self.file_path,
"real_path": self.real_path,
"alias": layer_data['name']
}
layer_data.update(extra_data)
self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)
# Fixme: The next 2 lines each build once
self.model.update_comp_layer(rebuild=True)
self.model.set_target_layer(self.real_path)
self.model.layer_added.emit(self.real_path)
self.setText("Added reference to {}".format(self.real_path))
class RemoveLayer(ReferenceLayer):
"""Remove existing layer"""
def __init__(self, layer_path, model):
idx = model.lookup_layer(layer_path).layer_idx()
super(RemoveLayer, self).__init__(layer_path, idx, model, None)
self.text = "Removed reference to {}".format(layer_path)
@processing
def undo(self):
super(RemoveLayer, self).redo()
self.setText(self.text)
@processing
def redo(self):
super(RemoveLayer, self).undo()
self.setText(self.text)
class MuteToggleLayer(NxtCommand):
"""Toggles muting an existing layer"""
def __init__(self, layer_path, model):
super(MuteToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_muted(local=True)
layer.set_muted(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_muted(local=False)
self.model.top_layer.set_mute_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_mute_changed.emit((self.layer_path,))
self.setText("Toggle {} muted.".format(layer.get_alias()))
class SoloToggleLayer(NxtCommand):
"""Toggles soloing an existing layer"""
def __init__(self, layer_path, model):
super(SoloToggleLayer, self).__init__(model)
self.layer_path = layer_path
self.model = model
self.layer_paths = []
def undo(self):
self.toggle_state()
for layer_path in self.layer_paths:
self.undo_effected_layer(layer_path)
def redo(self):
self.layer_paths = []
self.toggle_state()
for layer_path in self.layer_paths:
self.redo_effected_layer(layer_path)
@processing
def toggle_state(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
state = not layer.get_soloed(local=True)
layer.set_soloed(state)
self.layer_paths.append(layer.real_path)
else:
state = not layer.get_soloed(local=False)
self.model.top_layer.set_solo_over(layer.filepath, state)
self.layer_paths.append(self.model.top_layer.real_path)
self.model.update_comp_layer(rebuild=True)
self.model.layer_solo_changed.emit((self.layer_path,))
self.setText("Toggle {} soloed.".format(layer.get_alias()))
class SetLayerColor(NxtCommand):
def __init__(self, color, layer_path, model):
"""Sets the color for a given layer, if the layer is not a top layer
the top layer store an overrides.
:param color: string of new layer alias (name)
:param layer_path: real path of layer
:param model: StageModel
"""
super(SetLayerColor, self).__init__(model)
self.layer_path = layer_path
self.color = color
self.old_color = ''
self.model = model
self.stage = model.stage
@processing
def undo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
layer.color = self.old_color
else:
layer.set_color_over(self.old_color)
self.undo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
@processing
def redo(self):
layer = self.model.lookup_layer(self.layer_path)
if layer is self.model.top_layer:
self.old_color = layer.get_color(local=True)
layer.color = self.color
else:
self.old_color = layer.get_color(fallback_to_local=False)
layer.set_color_over(self.color)
self.redo_effected_layer(self.model.top_layer.real_path)
self.model.layer_color_changed.emit(self.layer_path)
self.setText("Set {} color to {}".format(layer.filepath, self.color))
def _add_node_hierarchy(base_node_path, model, layer):
stage = model.stage
comp_layer = model.comp_layer
new_node_paths = []
new_nodes = []
node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)
new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,
parent=None, layer=layer,
comp_layer=comp_layer)
for nn_p, n in new_node_table:
display_node = comp_layer.lookup(nn_p)
if display_node is not None:
display_child_order = getattr(display_node,
INTERNAL_ATTRS.CHILD_ORDER)
old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)
new_child_order = list_merger(display_child_order,
old_child_order)
setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)
new_node_paths += [nn_p]
new_nodes += [n]
return new_nodes, new_node_paths, dirty
def undo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug("Undo " + cmd.text() + " | " + update_time + "ms")
def redo_debug(cmd, start):
update_time = str(int(round((time.time() - start) * 1000)))
logger.debug(cmd.text() + " | " + update_time + "ms")
| 2.25 | 2 |
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py | libracore/mietrechtspraxis | 1 | 4537 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from PyPDF2 import PdfFileWriter
from frappe.utils.file_manager import save_file
class ArbitrationAuthority(Document):
pass
def _get_sb(**kwargs):
'''
call on [IP]/api/method/mietrechtspraxis.api.get_sb
Mandatory Parameter:
- token
- plz
'''
# check that token is present
try:
token = kwargs['token']
except:
# 400 Bad Request (Missing Token)
return raise_4xx(400, 'Bad Request', 'Token Required')
# check that token is correct
if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'):
# 401 Unauthorized (Invalid Token)
return raise_4xx(401, 'Unauthorized', 'Invalid Token')
# check that plz_city is present
try:
plz_city = kwargs['plz_city']
except:
# 400 Bad Request (Missing PLZ/City)
return raise_4xx(400, 'Bad Request', 'PLZ/City Required')
answer = []
# lookup for plz
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `pincode` = '{plz_city}'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) < 1:
# lookup for city
city_results = frappe.db.sql("""
SELECT
`city`,
`municipality`,
`district`,
`canton`
FROM `tabPincode`
WHERE `city` LIKE '%{plz_city}%'
ORDER BY `city` ASC
""".format(plz_city=plz_city), as_dict=True)
if len(city_results) > 0:
for city in city_results:
data = {}
data['plz'] = city.plz
data['ort'] = city.city
data['gemeinde'] = city.municipality
data['bezirk'] = city.district
data['kanton'] = city.canton
data['allgemein'] = get_informations(city.canton)
data['schlichtungsbehoerde'] = frappe.db.sql("""
SELECT
`schlichtungsbehoerde`.`titel` AS `Titel`,
`schlichtungsbehoerde`.`telefon` AS `Telefon`,
`schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`,
`schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`,
`schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`,
`schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`,
`schlichtungsbehoerde`.`homepage` AS `Homepage`
FROM `tabArbitration Authority` AS `schlichtungsbehoerde`
LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent`
WHERE `geminendentbl`.`municipality` = '{municipality}'
""".format(municipality=city.municipality), as_dict=True)
answer.append(data)
if len(answer) > 0:
return raise_200(answer)
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
else:
# 404 Not Found
return raise_4xx(404, 'Not Found', 'No results')
def get_informations(kanton):
search = frappe.db.sql("""
SELECT
`informationen`,
`homepage`,
`gesetzessammlung`,
`formulare`
FROM `tabKantonsinformationen`
WHERE `kanton` = '{kanton}'
""".format(kanton=kanton), as_dict=True)
if len(search) > 0:
result = search[0]
else:
result = {}
return result
def raise_4xx(code, title, message):
# 4xx Bad Request / Unauthorized / Not Found
return ['{code} {title}'.format(code=code, title=title), {
"error": {
"code": code,
"message": "{message}".format(message=message)
}
}]
def raise_200(answer):
return ['200 OK', answer]
@frappe.whitelist()
def get_sammel_pdf(no_letterhead=1):
frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead})
return
def _get_sammel_pdf(no_letterhead=1):
output = PdfFileWriter()
schlichtungsbehoerden = frappe.db.sql("""SELECT `name` FROM `tabArbitration Authority`""", as_dict=True)
for schlichtungsbehoerde in schlichtungsbehoerden:
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead)
output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead)
pdf = frappe.utils.pdf.get_file_data_from_writer(output)
now = datetime.now()
ts = "{0:04d}-{1:02d}-{2:02d}".format(now.year, now.month, now.day)
file_name = "{0}_{1}.pdf".format('SB_Sammel-PDF', ts)
save_file(file_name, pdf, '', '', is_private=1)
return
| 2.375 | 2 |
easysockets/client_socket.py | Matthias1590/EasySockets | 2 | 4538 | from .connection import Connection
import socket
class ClientSocket:
def __init__(self) -> None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, host: str, port: int) -> Connection:
self.__socket.connect((host, port))
return Connection(self.__socket)
| 3.0625 | 3 |
pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py | yurivict/USD | 1 | 4539 | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
# pylint: disable=map-builtin-not-iterating
import sys, unittest
from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf
class TestUsdGeomSchemata(unittest.TestCase):
def test_Basic(self):
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
p = stage.DefinePrim("/Mesh", "Mesh")
self.assertTrue(p)
mesh = UsdGeom.Mesh(p)
self.assertTrue(mesh)
self.assertTrue(mesh.GetPrim())
self.assertTrue(not mesh.GetPointsAttr().Get(1))
self.assertEqual(p.GetTypeName(),
Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType()))
#
# Make sure uniform access behaves as expected.
#
ori = p.GetAttribute("orientation")
# The generic orientation attribute should be automatically defined because
# it is a registered attribute of a well known schema. However, it's not
# yet authored at the current edit target.
self.assertTrue(ori.IsDefined())
self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
# Author a value, and check that it's still defined, and now is in fact
# authored at the current edit target.
ori.Set(UsdGeom.Tokens.leftHanded)
self.assertTrue(ori.IsDefined())
self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))
mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10)
# "leftHanded" should have been authored at Usd.TimeCode.Default, so reading the
# attribute at Default should return lh, not rh.
self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded)
# The value "rightHanded" was set at t=10, so reading *any* time should
# return "rightHanded"
self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded)
self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded)
#
# Attribute name sanity check. We expect the names returned by the schema
# to match the names returned via the generic API.
#
self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0)
self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False))
for n in mesh.GetSchemaAttributeNames():
# apiName overrides
if n == "primvars:displayColor":
n = "displayColor"
elif n == "primvars:displayOpacity":
n = "displayOpacity"
name = n[0].upper() + n[1:]
self.assertTrue(("Get" + name + "Attr") in dir(mesh),
("Get" + name + "Attr() not found in: " + str(dir(mesh))))
def test_IsA(self):
# Author Scene and Compose Stage
l = Sdf.Layer.CreateAnonymous()
stage = Usd.Stage.Open(l.identifier)
# For every prim schema type in this module, validate that:
# 1. We can define a prim of its type
# 2. Its type and inheritance matches our expectations
# 3. At least one of its builtin properties is available and defined
# BasisCurves Tests
schema = UsdGeom.BasisCurves.Define(stage, "/BasisCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder
self.assertTrue(schema.GetBasisAttr())
# Camera Tests
schema = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder
self.assertTrue(schema.GetFocalLengthAttr())
# Capsule Tests
schema = UsdGeom.Capsule.Define(stage, "/Capsule")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cone Tests
schema = UsdGeom.Cone.Define(stage, "/Cone")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Cube Tests
schema = UsdGeom.Cube.Define(stage, "/Cube")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder
self.assertTrue(schema.GetSizeAttr())
# Cylinder Tests
schema = UsdGeom.Cylinder.Define(stage, "/Cylinder")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable
self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder
self.assertTrue(schema.GetAxisAttr())
# Mesh Tests
schema = UsdGeom.Mesh.Define(stage, "/Mesh")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder
self.assertTrue(schema.GetFaceVertexCountsAttr())
# NurbsCurves Tests
schema = UsdGeom.NurbsCurves.Define(stage, "/NurbsCurves")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder
self.assertTrue(schema.GetKnotsAttr())
# NurbsPatch Tests
schema = UsdGeom.NurbsPatch.Define(stage, "/NurbsPatch")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder
self.assertTrue(schema.GetUKnotsAttr())
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder
self.assertTrue(schema.GetWidthsAttr())
# Scope Tests
schema = UsdGeom.Scope.Define(stage, "/Scope")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh
self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder
# Scope has no builtins!
# Sphere Tests
schema = UsdGeom.Sphere.Define(stage, "/Sphere")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder
self.assertTrue(schema.GetRadiusAttr())
# Xform Tests
schema = UsdGeom.Xform.Define(stage, "/Xform")
self.assertTrue(schema)
prim = schema.GetPrim()
self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh
self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable
self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder
self.assertTrue(schema.GetXformOpOrderAttr())
def test_Fallbacks(self):
# Author Scene and Compose Stage
stage = Usd.Stage.CreateInMemory()
# Xformable Tests
identity = Gf.Matrix4d(1)
origin = Gf.Vec3f(0, 0, 0)
xform = UsdGeom.Xform.Define(stage, "/Xform") # direct subclass
xformOpOrder = xform.GetXformOpOrderAttr()
self.assertFalse(xformOpOrder.HasAuthoredValue())
# xformOpOrder has no fallback value
self.assertEqual(xformOpOrder.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
# Try authoring and reverting...
xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder)
self.assertTrue(xformOpOrderAttr)
self.assertEqual(xformOpOrderAttr.Get(), None)
opOrderVal = ["xformOp:transform"]
self.assertTrue(xformOpOrderAttr.Set(opOrderVal))
self.assertTrue(xformOpOrderAttr.HasAuthoredValue())
self.assertNotEqual(xformOpOrderAttr.Get(), None)
self.assertTrue(xformOpOrderAttr.Clear())
self.assertFalse(xformOpOrderAttr.HasAuthoredValue())
self.assertEqual(xformOpOrderAttr.Get(), None)
self.assertFalse(xformOpOrder.HasFallbackValue())
mesh = UsdGeom.Mesh.Define(stage, "/Mesh") # multiple ancestor hops
# PointBased and Curves
curves = UsdGeom.BasisCurves.Define(stage, "/Curves")
self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex)
self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex)
# Before we go, test that CreateXXXAttr performs as we expect in various
# scenarios
# Number 1: Sparse and non-sparse authoring on def'd prim
mesh.CreateDoubleSidedAttr(False, True)
self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue())
mesh.CreateDoubleSidedAttr(False, False)
self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue())
# Number 2: Sparse authoring demotes to dense for non-defed prim
overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh'))
overMesh.CreateDoubleSidedAttr(False, True)
self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue())
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False)
overMesh.CreateDoubleSidedAttr(True, True)
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# make it a defined mesh, and sanity check it still evals the same
mesh2 = UsdGeom.Mesh.Define(stage, "/overMesh")
self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)
# Check querying of fallback values.
sphere = UsdGeom.Sphere.Define(stage, "/Sphere")
radius = sphere.GetRadiusAttr()
self.assertTrue(radius.HasFallbackValue())
radiusQuery = Usd.AttributeQuery(radius)
self.assertTrue(radiusQuery.HasFallbackValue())
def test_DefineSchema(self):
s = Usd.Stage.CreateInMemory()
parent = s.OverridePrim('/parent')
self.assertTrue(parent)
# Make a subscope.
scope = UsdGeom.Scope.Define(s, '/parent/subscope')
self.assertTrue(scope)
# Assert that a simple find or create gives us the scope back.
self.assertTrue(s.OverridePrim('/parent/subscope'))
self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim())
# Try to make a mesh at subscope's path. This transforms the scope into a
# mesh, since Define() always authors typeName.
mesh = UsdGeom.Mesh.Define(s, '/parent/subscope')
self.assertTrue(mesh)
self.assertTrue(not scope)
# Make a mesh at a different path, should work.
mesh = UsdGeom.Mesh.Define(s, '/parent/mesh')
self.assertTrue(mesh)
def test_BasicMetadataCases(self):
s = Usd.Stage.CreateInMemory()
spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim()
radius = spherePrim.GetAttribute('radius')
self.assertTrue(radius.HasMetadata('custom'))
self.assertTrue(radius.HasMetadata('typeName'))
self.assertTrue(radius.HasMetadata('variability'))
self.assertTrue(radius.IsDefined())
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetTypeName(), 'double')
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# Author a custom property spec.
layer = s.GetRootLayer()
sphereSpec = layer.GetPrimAtPath('/sphere')
radiusSpec = Sdf.AttributeSpec(
sphereSpec, 'radius', Sdf.ValueTypeNames.Double,
variability=Sdf.VariabilityUniform, declaresCustom=True)
self.assertTrue(radiusSpec.custom)
self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform)
# Definition should win.
self.assertTrue(not radius.IsCustom())
self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying)
allMetadata = radius.GetAllMetadata()
self.assertEqual(allMetadata['typeName'], 'double')
self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)
self.assertEqual(allMetadata['custom'], False)
# List fields on 'visibility' attribute -- should include 'allowedTokens',
# provided by the property definition.
visibility = spherePrim.GetAttribute('visibility')
self.assertTrue(visibility.IsDefined())
self.assertTrue('allowedTokens' in visibility.GetAllMetadata())
# Assert that attribute fallback values are returned for builtin attributes.
do = spherePrim.GetAttribute('primvars:displayOpacity')
self.assertTrue(do.IsDefined())
self.assertTrue(do.Get() is None)
def test_Camera(self):
from pxr import Gf
stage = Usd.Stage.CreateInMemory()
camera = UsdGeom.Camera.Define(stage, "/Camera")
self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable
self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective')
camera.GetProjectionAttr().Set('orthographic')
self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic')
self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(),
0.825 * 25.4, 1e-5))
camera.GetHorizontalApertureAttr().Set(3.0)
self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0)
self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(),
0.602 * 25.4, 1e-5))
camera.GetVerticalApertureAttr().Set(2.0)
self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0)
self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0)
camera.GetFocalLengthAttr().Set(35.0)
self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5))
self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000))
camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10))
self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(),
Gf.Vec2f(5, 10), 1e-5))
self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray())
cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)])
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
cp = Vt.Vec4fArray()
camera.GetClippingPlanesAttr().Set(cp)
self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)
self.assertEqual(camera.GetFStopAttr().Get(), 0.0)
camera.GetFStopAttr().Set(2.8)
self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5))
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0)
camera.GetFocusDistanceAttr().Set(10.0)
self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0)
def test_Points(self):
stage = Usd.Stage.CreateInMemory()
# Points Tests
schema = UsdGeom.Points.Define(stage, "/Points")
self.assertTrue(schema)
# Test that id's roundtrip properly, for big numbers, and negative numbers
ids = [8589934592, 1099511627776, 0, -42]
schema.CreateIdsAttr(ids)
resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list
self.assertEqual(ids, resolvedIds)
def test_Revert_Bug111239(self):
# This used to test a change for Bug111239, but now tests that this
# fix has been reverted. We no longer allow the C++ typename be used as
# a prim's typename.
s = Usd.Stage.CreateInMemory()
sphere = s.DefinePrim('/sphere', typeName='Sphere')
tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName
self.assertEqual(tfTypeName, 'UsdGeomSphere')
usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName')
self.assertTrue(UsdGeom.Sphere(sphere))
self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()])
self.assertFalse(UsdGeom.Sphere(usdGeomSphere))
self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()])
def test_ComputeExtent(self):
from pxr import Gf
# Create some simple test cases
allPoints = [
[(1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
[(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points Test
# Complex Test, Many Points/Widths
[(3, -1, 5), (-1.5, 0, 3), (1, 3, -2), (2, 2, -4)],
]
allWidths = [
[0], # Zero-Volume Extent Test
[2], # Simple Width Test
[2, 4], # Multiple Width Test
[2, 4, 5], # Erroneous Widths/Points Test
[1, 2, 2, 1] # Complex Test, Many Points/Widths
]
pointBasedSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(0, 0, 0), (0, 0, 0)], # Simple Width Test
[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test
# Erroneous Widths/Points Test -> Ok For Point-Based
[(-1, -1, -1), (1, 1, 1)],
[(-1.5, -1, -4), (3, 3, 5)] # Complex Test, Many Points/Widths
]
pointsSolutions = [
[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test
[(-1, -1, -1), (1, 1, 1)], # Simple Width Test
[(-2, -2, -2), (3, 3, 3)], # Multiple Width Test
# Erroneous Widths/Points Test -> Returns None
None,
[(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths
]
# Perform the correctness tests for PointBased and Points
# Test for empty points prims
emptyPoints = []
extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints)
# We need to map the contents of extremeExtentArr to floats from
# num.float32s due to the way Gf.Vec3f is wrapped out
# XXX: This is awful, it'd be nice to not do it
extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])),
Gf.Vec3f(*map(float, extremeExtentArr[1])))
self.assertTrue(extremeExtentRange.IsEmpty())
# PointBased Test
numDataSets = len(allPoints)
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Points Test
for i in range(numDataSets):
pointsData = allPoints[i]
widthsData = allWidths[i]
expectedExtent = pointsSolutions[i]
actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData)
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
pointsPrim = UsdGeom.Points.Define(s, "/Points")
pointsPrim.CreatePointsAttr(pointsData)
pointsPrim.CreateWidthsAttr(widthsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
pointsPrim, Usd.TimeCode.Default())
if actualExtent is not None and expectedExtent is not None:
for a, b in zip(expectedExtent, list(actualExtent)):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Mesh Test
for i in range(numDataSets):
pointsData = allPoints[i]
expectedExtent = pointBasedSolutions[i]
# Compute extent via generic UsdGeom.Boundable API.
# UsdGeom.Mesh does not have its own compute extent function, so
# it should fall back to the extent for PointBased prims.
s = Usd.Stage.CreateInMemory()
meshPrim = UsdGeom.Mesh.Define(s, "/Mesh")
meshPrim.CreatePointsAttr(pointsData)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
meshPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Test UsdGeomCurves
curvesPoints = [
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths
[(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width
]
curvesWidths = [
[1], # Test Curve with 1 width
[.5, .1], # Test Curve with 2 widths
[] # Test Curve with no width
]
curvesSolutions = [
[(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width
[(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX)
[(0,0,0), (3,1,1)], # Test Curve with no width
]
# Perform the actual v. expected comparison
numDataSets = len(curvesPoints)
for i in range(numDataSets):
pointsData = curvesPoints[i]
widths = curvesWidths[i]
expectedExtent = curvesSolutions[i]
actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
# Compute extent via generic UsdGeom.Boundable API
s = Usd.Stage.CreateInMemory()
nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, "/NurbsCurves")
nurbsCurvesPrim.CreatePointsAttr(pointsData)
nurbsCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
nurbsCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
basisCurvesPrim = UsdGeom.BasisCurves.Define(s, "/BasisCurves")
basisCurvesPrim.CreatePointsAttr(pointsData)
basisCurvesPrim.CreateWidthsAttr(widths)
actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(
basisCurvesPrim, Usd.TimeCode.Default())
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_TypeUsage(self):
# Perform Type-Ness Checking for ComputeExtent
pointsAsList = [(0, 0, 0), (1, 1, 1), (2, 2, 2)]
pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList)
comp = UsdGeom.PointBased.ComputeExtent
expectedExtent = comp(pointsAsVec3fArr)
actualExtent = comp(pointsAsList)
for a, b in zip(expectedExtent, actualExtent):
self.assertTrue(Gf.IsClose(a, b, 1e-5))
def test_Bug116593(self):
from pxr import Gf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/sphere', typeName='Sphere')
# set with list of tuples
vec = [(1,2,2),(12,3,3)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3))
# set with Gf vecs
vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)]
self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))
self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1))
def test_Typed(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry.IsTyped(xform))
self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable))
self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI))
def test_Concrete(self):
from pxr import Tf
xform = Tf.Type.FindByName("UsdGeomXform")
imageable = Tf.Type.FindByName("UsdGeomImageable")
geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI")
self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable))
self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI))
def test_Apply(self):
s = Usd.Stage.CreateInMemory('AppliedSchemas.usd')
root = s.DefinePrim('/hello')
self.assertEqual([], root.GetAppliedSchemas())
# Check duplicates
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
UsdGeom.MotionAPI.Apply(root)
self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())
# Ensure duplicates aren't picked up
UsdGeom.ModelAPI.Apply(root)
self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas())
# Verify that we get exceptions but don't crash when applying to the
# null prim.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim()))
with self.assertRaises(Tf.ErrorException):
self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim()))
def test_IsATypeless(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
spherePrim = s.DefinePrim('/sphere', typeName='Sphere')
typelessPrim = s.DefinePrim('/regular')
types = [Tf.Type.FindByName('UsdGeomSphere'),
Tf.Type.FindByName('UsdGeomGprim'),
Tf.Type.FindByName('UsdGeomBoundable'),
Tf.Type.FindByName('UsdGeomXformable'),
Tf.Type.FindByName('UsdGeomImageable'),
Tf.Type.FindByName('UsdTyped')]
# Our sphere prim should return true on IsA queries for Sphere
# and everything it inherits from. Our plain prim should return false
# for all of them.
for t in types:
self.assertTrue(spherePrim.IsA(t))
self.assertFalse(typelessPrim.IsA(t))
def test_HasAPI(self):
from pxr import Usd, Tf
s = Usd.Stage.CreateInMemory()
prim = s.DefinePrim('/prim')
types = [Tf.Type.FindByName('UsdGeomMotionAPI'),
Tf.Type.FindByName('UsdGeomModelAPI')]
# Check that no APIs have yet been applied
for t in types:
self.assertFalse(prim.HasAPI(t))
# Apply our schemas to this prim
UsdGeom.ModelAPI.Apply(prim)
UsdGeom.MotionAPI.Apply(prim)
# Check that all our applied schemas show up
for t in types:
self.assertTrue(prim.HasAPI(t))
# Check that we get an exception for unknown and non-API types
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.Unknown)
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomXform'))
with self.assertRaises(Tf.ErrorException):
prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable'))
with self.assertRaises(Tf.ErrorException):
# Test with a non-applied API schema.
prim.HasAPI(Tf.Type.FindByName('UsdModelAPI'))
if __name__ == "__main__":
unittest.main()
| 1.960938 | 2 |
round_robin_generator/matchup_times.py | avadavat/round_robin_generator | 0 | 4540 | import pandas as pd
from datetime import timedelta
def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger):
time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns)
if game_stagger == 0:
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num))
time_df.loc[round_key, :] = match_time.strftime('%I:%M%p')
return time_df
else:
"""
# Given the algorithm, at worst every player can play every (game duration + stagger time)
# This is b/c your opponent begins play one stagger count after you at the latest.
"""
for round_num in range(time_df.shape[0]):
round_key = 'Round ' + str(round_num + 1)
default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in
range(time_df.shape[1])]
match_times = [
(def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for
def_time in default_spread]
time_df.loc[round_key, :] = match_times
return time_df
| 2.984375 | 3 |
src/commands/locate_item.py | seisatsu/DennisMUD-ESP32 | 19 | 4541 | <reponame>seisatsu/DennisMUD-ESP32<filename>src/commands/locate_item.py
#######################
# <NAME> #
# locate_item.py #
# Copyright 2018-2020 #
# <NAME> #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "locate item"
CATEGORIES = ["items"]
ALIASES = ["find item"]
USAGE = "locate item <item_id>"
DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it.
You can only locate an item that you own.
Wizards can locate any item.
Ex. `locate item 4`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argc=1):
return False
# Perform argument type checks and casts.
itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)
if itemid is None:
return False
# Check if the item exists.
thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False)
if not thisitem:
return False
# Keep track of whether we found anything in case the item is duplified and we can't return right away.
found_something = False
# Check if we are holding the item.
if itemid in console.user["inventory"]:
console.msg("{0}: {1} ({2}) is in your inventory.".format(NAME, thisitem["name"], thisitem["id"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Check if someone else is holding the item.
for targetuser in console.database.users.all():
if targetuser["name"] == console.user["name"]:
continue
if itemid in targetuser["inventory"]:
console.msg("{0}: {1} ({2}) is in the inventory of: {3}.".format(NAME, thisitem["name"], thisitem["id"],
targetuser["name"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Check if the item is in a room.
for targetroom in console.database.rooms.all():
if itemid in targetroom["items"]:
console.msg("{0}: {1} ({2}) is in room: {3} ({4})".format(NAME, thisitem["name"], thisitem["id"],
targetroom["name"], targetroom["id"]))
# If the item is duplified we need to keep looking for other copies.
if not thisitem["duplified"]:
return True
found_something = True
# Couldn't find the item.
if not found_something:
console.log.error("Item exists but has no location: {item}", item=itemid)
console.msg("{0}: ERROR: Item exists but has no location. Use `requisition` to fix this.".format(NAME))
return False
# Finished.
return True
| 2.296875 | 2 |
modelling/scsb/models/monthly-comparisons.py | bcgov-c/wally | 0 | 4542 | <filename>modelling/scsb/models/monthly-comparisons.py
import json
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as MSE, r2_score
import math
# with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f:
# data = json.load(f)
all_zones_df = pd.read_csv("../data/scsb_all_zones.csv")
zone_25_df = pd.read_csv("../data/scsb_zone_25.csv")
zone_26_df = pd.read_csv("../data/scsb_zone_26.csv")
zone_27_df = pd.read_csv("../data/scsb_zone_27.csv")
month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist']
month_labels = [x[0:3] for x in month_dependant_variables]
data = zone_26_df
xgb_results = []
rfr_results = []
dtr_results = []
# calculate monthly estimations for 3 models
for dependant_month in month_dependant_variables:
features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]]
X = features_df.drop([dependant_month], axis=1)
y = features_df.get(dependant_month)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
xgb = XGBRegressor(random_state=42)
xgb.fit(X_train, y_train)
xgb_results.append(xgb.predict(X))
rfr = RandomForestRegressor(random_state=42)
rfr.fit(X_train, y_train)
rfr_results.append(rfr.predict(X))
dtr = DecisionTreeRegressor(random_state=42)
dtr.fit(X_train, y_train)
dtr_results.append(dtr.predict(X))
# compare the outputs of scsb against the 3 models
for row_target_index in range(20):
xgb_row = []
rfr_row = []
dtr_row = []
for month in range(12):
xgb_row.append(xgb_results[month][row_target_index])
rfr_row.append(rfr_results[month][row_target_index])
dtr_row.append(dtr_results[month][row_target_index])
plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5)
plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5)
plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5)
plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5)
plt.legend(loc='best')
plt.xticks(month_dependant_variables, month_labels)
plt.xlabel('Month')
plt.ylabel('Monthly Distribution')
name = data['name'].iloc[row_target_index]
plt.title(name)
plt.savefig('../plots/{}.png'.format(name))
plt.show()
| 2.53125 | 3 |
src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py | xzhnshng/databricks-zero-to-mlops | 0 | 4543 | # Databricks notebook source
# MAGIC %md
# MAGIC # XGBoost training
# MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.
# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.)
# MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar.
# MAGIC
# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_
# COMMAND ----------
import mlflow
import databricks.automl_runtime
# Use MLflow to track experiments
mlflow.set_experiment("/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38")
target_col = "label"
# COMMAND ----------
# MAGIC %md
# MAGIC ## Load Data
# COMMAND ----------
from mlflow.tracking import MlflowClient
import os
import uuid
import shutil
import pandas as pd
# Create temp directory to download input data from MLflow
input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8])
os.makedirs(input_temp_dir)
# Download the artifact and read it into a pandas DataFrame
input_client = MlflowClient()
input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir)
df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data"))
# Delete the temp data
shutil.rmtree(input_temp_dir)
# Preview data
df_loaded.head(5)
# COMMAND ----------
df_loaded.head(1).to_dict()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Select supported columns
# MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training.
# MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped.
# COMMAND ----------
from databricks.automl_runtime.sklearn.column_selector import ColumnSelector
supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"]
col_selector = ColumnSelector(supported_cols)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Preprocessors
# COMMAND ----------
transformers = []
# COMMAND ----------
# MAGIC %md
# MAGIC ### Categorical columns
# COMMAND ----------
# MAGIC %md
# MAGIC #### Low-cardinality categoricals
# MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding.
# MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column.
# COMMAND ----------
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
one_hot_encoder = OneHotEncoder(handle_unknown="ignore")
transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"]))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Medium-cardinality categoricals
# MAGIC Convert each medium-cardinality categorical column into a numerical representation.
# MAGIC Each string column is hashed to 1024 float columns.
# MAGIC Each numeric column is imputed with zeros.
# COMMAND ----------
from sklearn.feature_extraction import FeatureHasher
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
for feature in ["text", "main_img_url"]:
hash_transformer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
(f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))])
transformers.append((f"{feature}_hasher", hash_transformer, [feature]))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Text features
# MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output
# MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams
# MAGIC where n is in the range [1, 2].
# COMMAND ----------
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer
for col in {'type', 'author'}:
vectorizer = Pipeline(steps=[
("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")),
# Reshape to 1D since SimpleImputer changes the shape of the input to 2D
("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})),
("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))])
transformers.append((f"text_{col}", vectorizer, [col]))
# COMMAND ----------
from sklearn.compose import ColumnTransformer
preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature standardization
# MAGIC Scale all feature columns to be centered around zero with unit variance.
# COMMAND ----------
from sklearn.preprocessing import StandardScaler
standardizer = StandardScaler()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train - Validation - Test Split
# MAGIC Split the input data into 3 sets:
# MAGIC - Train (60% of the dataset used to train the model)
# MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model)
# MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset)
# COMMAND ----------
df_loaded.columns
# COMMAND ----------
from sklearn.model_selection import train_test_split
split_X = df_loaded.drop([target_col], axis=1)
split_y = df_loaded[target_col]
# Split out train data
X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)
# Split remaining data equally for validation and test
X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Train classification model
# MAGIC - Log relevant metrics to MLflow to track runs
# MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)
# MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment
# MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below
# COMMAND ----------
from xgboost import XGBClassifier
help(XGBClassifier)
# COMMAND ----------
import mlflow
import sklearn
from sklearn import set_config
from sklearn.pipeline import Pipeline
set_config(display="diagram")
xgbc_classifier = XGBClassifier(
colsample_bytree=0.7324555878929649,
learning_rate=0.007636627530856404,
max_depth=7,
min_child_weight=6,
n_estimators=106,
n_jobs=100,
subsample=0.6972187716458148,
verbosity=0,
random_state=799811440,
)
model = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
("classifier", xgbc_classifier),
])
# Create a separate pipeline to transform the validation dataset. This is used for early stopping.
pipeline = Pipeline([
("column_selector", col_selector),
("preprocessor", preprocessor),
("standardizer", standardizer),
])
mlflow.sklearn.autolog(disable=True)
X_val_processed = pipeline.fit_transform(X_val, y_val)
model
# COMMAND ----------
# Enable automatic logging of input samples, metrics, parameters, and models
mlflow.sklearn.autolog(log_input_examples=True, silent=True)
with mlflow.start_run(run_name="xgboost") as mlflow_run:
model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)
# Training metrics are logged by MLflow autologging
# Log metrics for the validation set
xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_")
# Log metrics for the test set
xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_")
# Display the logged metrics
xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()}
xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()}
display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"]))
# COMMAND ----------
# Patch requisite packages to the model environment YAML for model serving
import os
import shutil
import uuid
import yaml
None
import xgboost
from mlflow.tracking import MlflowClient
xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8])
os.makedirs(xgbc_temp_dir)
xgbc_client = MlflowClient()
xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir)
xgbc_model_env_str = open(xgbc_model_env_path)
xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)
xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}")
with open(xgbc_model_env_path, "w") as f:
f.write(yaml.dump(xgbc_parsed_model_env_str))
xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model")
shutil.rmtree(xgbc_temp_dir)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature importance
# MAGIC
# MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot
# MAGIC of the relationship between features and model output. Features are ranked in descending order of
# MAGIC importance, and impact/color describe the correlation between the feature and the target variable.
# MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without
# MAGIC running out of memory, we disable SHAP by default.<br />
# MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots.
# MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br />
# MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain.
# MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and
# MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed
# MAGIC SHAP values, as the imputed samples may not match the actual data distribution.
# MAGIC
# MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).
# COMMAND ----------
# Set this flag to True and re-run the notebook to see the SHAP plots
shap_enabled = True
# COMMAND ----------
if shap_enabled:
from shap import KernelExplainer, summary_plot
# SHAP cannot explain models using data with nulls.
# To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values).
mode = X_train.mode().iloc[0]
# Sample background data for SHAP Explainer. Increase the sample size to reduce variance.
train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode)
# Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results.
example = X_val.sample(n=1).fillna(mode)
# Use Kernel SHAP to explain feature importance on the example from the validation set.
predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns))
explainer = KernelExplainer(predict, train_sample, link="logit")
shap_values = explainer.shap_values(example, l1_reg=False)
summary_plot(shap_values, example, class_names=model.classes_)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Inference
# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference.
# MAGIC
# MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below
# MAGIC
# MAGIC ### Register to Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)
# MAGIC ```
# MAGIC
# MAGIC ### Load from Model Registry
# MAGIC ```
# MAGIC model_name = "Example"
# MAGIC model_version = registered_model_version.version
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}")
# MAGIC model.predict(input_X)
# MAGIC ```
# MAGIC
# MAGIC ### Load model without registering
# MAGIC ```
# MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model"
# MAGIC
# MAGIC model = mlflow.pyfunc.load_model(model_uri)
# MAGIC model.predict(input_X)
# MAGIC ```
# COMMAND ----------
# model_uri for the generated model
print(f"runs:/{ mlflow_run.info.run_id }/model")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Loading model to make prediction
# COMMAND ----------
model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model"
model = mlflow.pyfunc.load_model(model_uri)
#model.predict(input_X)
# COMMAND ----------
import pandas as pd
data = {'author': {0: '<EMAIL>jim.<EMAIL>'},
'published': {0: '2016-10-27T18:05:26.351+03:00'},
'title': {0: 'aliens are coming to invade earth'},
'text': {0: 'aliens are coming to invade earth'},
'language': {0: 'english'},
'site_url': {0: 'cnn.com'},
'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},
'type': {0: 'bs'},
'title_without_stopwords': {0: 'aliens are coming to invade earth'},
'text_without_stopwords': {0: 'aliens are coming to invade earth'},
'hasImage': {0: 1.0}}
df = pd.DataFrame(data=data)
df.head()
# COMMAND ----------
model.predict(df)
# COMMAND ----------
| 2.25 | 2 |
lucky_guess/__init__.py | mfinzi/lucky-guess-chemist | 0 | 4544 |
import importlib
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
module = importlib.import_module('.'+module_name,package=__name__)
try:
globals().update({k: getattr(module, k) for k in module.__all__})
__all__ += module.__all__
except AttributeError: continue | 2.09375 | 2 |
shuffling_algorithm.py | BaptisteLafoux/aztec_tiling | 0 | 4545 | <filename>shuffling_algorithm.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:04:48 2020
@author: baptistelafoux
"""
import domino
import numpy as np
import numpy.lib.arraysetops as aso
def spawn_block(x, y):
if np.random.rand() > 0.5:
d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1]))
d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1, y + 1]), np.array([0, 1]))
else:
d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0]))
d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1, y + 1]), np.array([ 1,0]))
return [d1, d2]
def aztec_grid(order, only_new_blocks = True):
grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2 , np.arange(2 * order) - (2 * order - 1)/2)
center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T
center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))]
X = center_pts[:,0]
Y = center_pts[:,1]
if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order - 1)
else: idx = np.abs(X) + np.abs(Y) <= order
return X[idx], Y[idx]
def add_to_grid(tiles, grid):
for tile in tiles:
grid[tile.pt1[0], tile.pt1[1]] = tile
grid[tile.pt2[0], tile.pt2[1]] = tile
return grid
def generate_good_block(grid):
center_pts = np.array([*grid])
center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))]
X = center_pts[:, 0]
Y = center_pts[:, 1]
for (x,y) in zip(X,Y):
try:
if ~grid[x, y]:
idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)]
try:
should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool)
if should_create_a_block:
grid = add_to_grid(spawn_block(x, y), grid)
except: pass
except: pass
return grid
def enlarge_grid_deprec(grid, order):
center_pts = [*grid]
X_aztec, Y_aztec = aztec_grid(order)
center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)]
diff_array = set(center_pts_aztec) - set(center_pts)
if order > 1:
for x, y in list(diff_array):
grid[x, y] = False
else:
for (x,y) in zip(X_aztec, Y_aztec):
grid[x, y] = False
return grid
def enlarge_grid(grid, order):
X_aztec, Y_aztec = aztec_grid(order, True)
for (x,y) in zip(X_aztec, Y_aztec):
grid[x, y] = False
return grid
def move_tiles(grid, curr_order):
temp_grid = {}
for coord in grid:
if grid[coord] != False:
x1, y1 = grid[coord].pt1
x2, y2 = grid[coord].pt2
grid[coord].move()
temp_grid = add_to_grid([grid[coord]], temp_grid)
grid[x1, y1] = False
grid[x2, y2] = False
for coord in temp_grid:
grid[coord] = temp_grid[coord]
return grid
def destroy_bad_blocks(grid):
center_pts = np.array([*grid])
X = center_pts[:, 0]
Y = center_pts[:, 1]
for (x,y) in zip(X,Y):
try:
next_x, next_y = np.array([x, y]) + grid[x, y].v
if (grid[next_x, next_y] != False):
if all(grid[next_x, next_y].v == - grid[x, y].v):
grid[x, y ] = False
grid[next_x, next_y] = False
except: pass
return grid
| 3.078125 | 3 |
scripts/matrix_operations.py | h3ct0r/gas_mapping_example | 1 | 4546 | import numpy as np
def get_position_of_minimum(matrix):
return np.unravel_index(np.nanargmin(matrix), matrix.shape)
def get_position_of_maximum(matrix):
return np.unravel_index(np.nanargmax(matrix), matrix.shape)
def get_distance_matrix(cell_grid_x, cell_grid_y, x, y):
return np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)
def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y):
return (x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2
| 2.984375 | 3 |
ShanghaiPower/build_up.py | biljiang/pyprojects | 0 | 4547 | <gh_stars>0
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize(["license_chk.py"]))
| 1.117188 | 1 |
quantum/plugins/nicira/extensions/nvp_qos.py | yamt/neutron | 0 | 4548 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: <NAME>, Nicira Networks, Inc.
from abc import abstractmethod
from quantum.api import extensions
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import exceptions as qexception
from quantum import manager
# For policy.json/Auth
qos_queue_create = "create_qos_queue"
qos_queue_delete = "delete_qos_queue"
qos_queue_get = "get_qos_queue"
qos_queue_list = "get_qos_queues"
class DefaultQueueCreateNotAdmin(qexception.InUse):
message = _("Need to be admin in order to create queue called default")
class DefaultQueueAlreadyExists(qexception.InUse):
message = _("Default queue already exists.")
class QueueInvalidDscp(qexception.InvalidInput):
message = _("Invalid value for dscp %(data)s must be integer.")
class QueueMinGreaterMax(qexception.InvalidInput):
message = _("Invalid bandwidth rate, min greater than max.")
class QueueInvalidBandwidth(qexception.InvalidInput):
message = _("Invalid bandwidth rate, %(data)s must be a non negative"
" integer.")
class MissingDSCPForTrusted(qexception.InvalidInput):
message = _("No DSCP field needed when QoS workload marked trusted")
class QueueNotFound(qexception.NotFound):
message = _("Queue %(id)s does not exist")
class QueueInUseByPort(qexception.InUse):
message = _("Unable to delete queue attached to port.")
class QueuePortBindingNotFound(qexception.NotFound):
message = _("Port is not associated with lqueue")
def convert_to_unsigned_int_or_none(val):
if val is None:
return
try:
val = int(val)
if val < 0:
raise ValueError
except (ValueError, TypeError):
msg = _("'%s' must be a non negative integer.") % val
raise qexception.InvalidInput(error_message=msg)
return val
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'qos_queues': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'default': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'is_visible': True, 'default': False},
'name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'min': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'max': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_to_unsigned_int_or_none},
'qos_marking': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['untrusted', 'trusted']},
'default': 'untrusted', 'is_visible': True},
'dscp': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
},
}
QUEUE = 'queue_id'
RXTX_FACTOR = 'rxtx_factor'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
RXTX_FACTOR: {'allow_post': True,
'allow_put': False,
'is_visible': False,
'default': 1,
'convert_to': convert_to_unsigned_int_or_none},
QUEUE: {'allow_post': False,
'allow_put': False,
'is_visible': True,
'default': False}},
'networks': {QUEUE: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': False}}
}
class Nvp_qos(object):
"""Port Queue extension."""
@classmethod
def get_name(cls):
return "nvp-qos"
@classmethod
def get_alias(cls):
return "nvp-qos"
@classmethod
def get_description(cls):
return "NVP QoS extension."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/nvp-qos/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-10-05T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.QuantumManager.get_plugin()
resource_name = 'qos_queue'
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=False)
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items() +
RESOURCE_ATTRIBUTE_MAP.items())
else:
return {}
class QueuePluginBase(object):
@abstractmethod
def create_qos_queue(self, context, queue):
pass
@abstractmethod
def delete_qos_queue(self, context, id):
pass
@abstractmethod
def get_qos_queue(self, context, id, fields=None):
pass
@abstractmethod
def get_qos_queues(self, context, filters=None, fields=None):
pass
| 2.015625 | 2 |
easyneuron/math/__init__.py | TrendingTechnology/easyneuron | 1 | 4549 | <reponame>TrendingTechnology/easyneuron
"""easyneuron.math contains all of the maths tools that you'd ever need for your AI projects, when used alongside Numpy.
To suggest more to be added, please add an issue on the GitHub repo.
"""
from easyneuron.math.distance import euclidean_distance | 1.796875 | 2 |
tests/unit/concurrently/test_TaskPackageDropbox_put.py | shane-breeze/AlphaTwirl | 0 | 4550 | <reponame>shane-breeze/AlphaTwirl
# <NAME> <<EMAIL>>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import TaskPackageDropbox
##__________________________________________________________________||
@pytest.fixture()
def workingarea():
return mock.MagicMock()
@pytest.fixture()
def dispatcher():
return mock.MagicMock()
@pytest.fixture()
def obj(workingarea, dispatcher):
ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
ret.open()
yield ret
ret.close()
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
def test_open_terminate_close(workingarea, dispatcher):
obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)
assert 0 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.open()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 0 == dispatcher.terminate.call_count
obj.terminate()
assert 1 == workingarea.open.call_count
assert 0 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
obj.close()
assert 1 == workingarea.open.call_count
assert 1 == workingarea.close.call_count
assert 1 == dispatcher.terminate.call_count
def test_put(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run.side_effect = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert 0 == obj.put(package0)
assert 1 == obj.put(package1)
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list
def test_put_multiple(obj, workingarea, dispatcher):
workingarea.put_package.side_effect = [0, 1] # pkgidx
dispatcher.run_multiple.return_value = [1001, 1002] # runid
package0 = mock.MagicMock(name='package0')
package1 = mock.MagicMock(name='package1')
assert [0, 1] == obj.put_multiple([package0, package1])
assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list
assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list
##__________________________________________________________________||
| 2.28125 | 2 |
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py | gokarslan/networking-odl2 | 0 | 4551 | # Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base
from oslo_config import cfg
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase):
def setUp(self):
super(OdlDhcpDriverTestCase, self).setUp()
cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl')
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
def test_dhcp_flag_test(self):
self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service)
def test_dhcp_driver_load(self):
self.assertTrue(isinstance(self.mech.dhcp_driver,
odl_dhcp_driver.OdlDhcpDriver))
def test_dhcp_port_create_on_subnet_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
self.mech.journal.sync_pending_entries()
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_delete_on_port_update_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
plugin = data['plugin']
self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port_id)
port = plugin.get_port(data['context'], port_id)
port['fixed_ips'] = []
ports = {'port': port}
plugin.update_port(data['context'], port_id, ports)
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)
self.mech.journal.sync_pending_entries()
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port_id)
| 1.953125 | 2 |
users/migrations/0002_auto_20191113_1352.py | Dragonite/djangohat | 2 | 4552 | # Generated by Django 2.2.2 on 2019-11-13 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='users',
name='site_key',
field=models.CharField(blank=True, default='<KEY>', max_length=32, unique=True),
),
]
| 1.617188 | 2 |
premium/backend/src/baserow_premium/api/admin/dashboard/views.py | cjh0613/baserow | 839 | 4553 | <reponame>cjh0613/baserow
from datetime import timedelta
from django.contrib.auth import get_user_model
from drf_spectacular.utils import extend_schema
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from rest_framework.views import APIView
from baserow.api.decorators import accept_timezone
from baserow.core.models import Group, Application
from baserow_premium.admin.dashboard.handler import AdminDashboardHandler
from .serializers import AdminDashboardSerializer
User = get_user_model()
class AdminDashboardView(APIView):
permission_classes = (IsAdminUser,)
@extend_schema(
tags=["Admin"],
operation_id="admin_dashboard",
description="Returns the new and active users for the last 24 hours, 7 days and"
" 30 days. The `previous_` values are the values of the period before, so for "
"example `previous_new_users_last_24_hours` are the new users that signed up "
"from 48 to 24 hours ago. It can be used to calculate an increase or decrease "
"in the amount of signups. A list of the new and active users for every day "
"for the last 30 days is also included.\n\nThis is a **premium** feature.",
responses={
200: AdminDashboardSerializer,
401: None,
},
)
@accept_timezone()
def get(self, request, now):
"""
Returns the new and active users for the last 24 hours, 7 days and 30 days.
The `previous_` values are the values of the period before, so for example
`previous_new_users_last_24_hours` are the new users that signed up from 48
to 24 hours ago. It can be used to calculate an increase or decrease in the
amount of signups. A list of the new and active users for every day for the
last 30 days is also included.
"""
handler = AdminDashboardHandler()
total_users = User.objects.filter(is_active=True).count()
total_groups = Group.objects.all().count()
total_applications = Application.objects.all().count()
new_users = handler.get_new_user_counts(
{
"new_users_last_24_hours": timedelta(hours=24),
"new_users_last_7_days": timedelta(days=7),
"new_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
active_users = handler.get_active_user_count(
{
"active_users_last_24_hours": timedelta(hours=24),
"active_users_last_7_days": timedelta(days=7),
"active_users_last_30_days": timedelta(days=30),
},
include_previous=True,
)
new_users_per_day = handler.get_new_user_count_per_day(
timedelta(days=30), now=now
)
active_users_per_day = handler.get_active_user_count_per_day(
timedelta(days=30), now=now
)
serializer = AdminDashboardSerializer(
{
"total_users": total_users,
"total_groups": total_groups,
"total_applications": total_applications,
"new_users_per_day": new_users_per_day,
"active_users_per_day": active_users_per_day,
**new_users,
**active_users,
}
)
return Response(serializer.data)
| 2.390625 | 2 |
src/clientOld.py | dan3612812/socketChatRoom | 0 | 4554 | # -*- coding: UTF-8 -*-
import sys
import socket
import time
import threading
import select
HOST = '192.168.11.98'
PORT = int(sys.argv[1])
queue = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
queue.append(s)
print("add client to queue")
def socketRecv():
while True:
data = s.recv(1024).decode("utf-8")
print(data)
time.sleep(0.1)
def inputJob():
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
socketThread = threading.Thread(target=socketRecv)
socketThread.start()
# inputThread = Thread(target=inputJob)
# inputThread.start()
try:
while True:
data = input()
s.send(bytes(data, "utf-8"))
time.sleep(0.1)
except KeyboardInterrupt or EOFError:
print("in except")
# s.close() # 關閉連線
socketThread.do_run = False
# socketThread.join()
# inputThread.join()
print("close thread")
sys.exit(0)
| 3.015625 | 3 |
plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py | lukaszlaszuk/insightconnect-plugins | 46 | 4555 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Import observable(s) into Anomali ThreatStream with approval"
class Input:
FILE = "file"
OBSERVABLE_SETTINGS = "observable_settings"
class Output:
RESULTS = "results"
class ImportObservableInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"$ref": "#/definitions/file",
"title": "File",
"description": "File of data to be imported into Anomali ThreatStream",
"order": 1
},
"observable_settings": {
"$ref": "#/definitions/observable_settings",
"title": "Observable Settings",
"description": "Settings needed for importing an observable that needs approval",
"order": 2
}
},
"required": [
"file"
],
"definitions": {
"file": {
"id": "file",
"type": "object",
"title": "File",
"description": "File Object",
"properties": {
"content": {
"type": "string",
"title": "Content",
"description": "File contents",
"format": "bytes"
},
"filename": {
"type": "string",
"title": "Filename",
"description": "Name of file"
}
}
},
"observable_settings": {
"type": "object",
"title": "observable_settings",
"properties": {
"classification": {
"type": "string",
"title": "Classification",
"description": "Classification of the observable",
"default": "private",
"enum": [
"public",
"private"
],
"order": 4
},
"confidence": {
"type": "integer",
"title": "Confidence",
"description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",
"order": 1
},
"domain_mapping": {
"type": "string",
"title": "Domain Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 8
},
"email_mapping": {
"type": "string",
"title": "Email Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 10
},
"expiration_ts": {
"type": "string",
"title": "Expiration Time Stamp",
"displayType": "date",
"description": "Time stamp of when intelligence will expire on ThreatStream",
"format": "date-time",
"order": 5
},
"ip_mapping": {
"type": "string",
"title": "IP Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 7
},
"md5_mapping": {
"type": "string",
"title": "MD5 Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 11
},
"notes": {
"type": "array",
"title": "Notes",
"description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g ['note1', 'note2', 'note3']",
"items": {
"type": "string"
},
"order": 6
},
"severity": {
"type": "string",
"title": "Severity",
"description": "Severity you want to assign to the observable when it is imported",
"default": "",
"enum": [
"low",
"medium",
"high",
"very-high",
""
],
"order": 3
},
"source_confidence_weight": {
"type": "integer",
"title": "Source Confidence Weight",
"description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",
"order": 2
},
"threat_type": {
"type": "string",
"title": "Threat Type",
"description": "Type of threat associated with the imported observables",
"order": 13
},
"trustedcircles": {
"type": "array",
"title": "Trusted Circles",
"description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",
"items": {
"type": "integer"
},
"order": 12
},
"url_mapping": {
"type": "string",
"title": "URL Mapping",
"description": "Indicator type to assign if a specific type is not associated with an observable",
"order": 9
}
},
"required": [
"classification"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ImportObservableOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"$ref": "#/definitions/import_observable_response",
"title": "Results",
"description": "Results from importing observable(s)",
"order": 1
}
},
"definitions": {
"import_observable_response": {
"type": "object",
"title": "import_observable_response",
"properties": {
"import_session_id": {
"type": "string",
"title": "Import Session ID",
"description": "ID for import session",
"order": 3
},
"job_id": {
"type": "string",
"title": "Job ID",
"description": "Job ID",
"order": 1
},
"success": {
"type": "boolean",
"title": "Success",
"description": "If import was successful",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 2.484375 | 2 |
trove/tests/unittests/quota/test_quota.py | citrix-openstack-build/trove | 0 | 4556 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from mockito import mock, when, unstub, any, verify, never, times
from mock import Mock
from trove.quota.quota import DbQuotaDriver
from trove.quota.models import Resource
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.db.models import DatabaseModelBase
from trove.extensions.mgmt.quota.service import QuotaController
from trove.common import exception
from trove.common import cfg
from trove.quota.quota import run_with_quotas
from trove.quota.quota import QUOTAS
"""
Unit tests for the classes and functions in DbQuotaDriver.py.
"""
CONF = cfg.CONF
resources = {
Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),
}
FAKE_TENANT1 = "123456"
FAKE_TENANT2 = "654321"
class Run_with_quotasTest(testtools.TestCase):
def setUp(self):
super(Run_with_quotasTest, self).setUp()
self.quota_reserve_orig = QUOTAS.reserve
self.quota_rollback_orig = QUOTAS.rollback
self.quota_commit_orig = QUOTAS.commit
QUOTAS.reserve = Mock()
QUOTAS.rollback = Mock()
QUOTAS.commit = Mock()
def tearDown(self):
super(Run_with_quotasTest, self).tearDown()
QUOTAS.reserve = self.quota_reserve_orig
QUOTAS.rollback = self.quota_rollback_orig
QUOTAS.commit = self.quota_commit_orig
def test_run_with_quotas(self):
f = Mock()
run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.commit.called)
self.assertFalse(QUOTAS.rollback.called)
self.assertTrue(f.called)
def test_run_with_quotas_error(self):
f = Mock(side_effect=Exception())
self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1,
{'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.rollback.called)
self.assertFalse(QUOTAS.commit.called)
self.assertTrue(f.called)
class QuotaControllerTest(testtools.TestCase):
def setUp(self):
super(QuotaControllerTest, self).setUp()
context = mock()
context.is_admin = True
req = mock()
req.environ = mock()
when(req.environ).get(any()).thenReturn(context)
self.req = req
self.controller = QuotaController()
def tearDown(self):
super(QuotaControllerTest, self).tearDown()
unstub()
def test_update_unknown_resource(self):
body = {'quotas': {'unknown_resource': 5}}
self.assertRaises(exception.QuotaResourceUnknown,
self.controller.update, self.req, body,
FAKE_TENANT1, FAKE_TENANT2)
def test_update_resource_no_value(self):
quota = mock(Quota)
when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(quota)
body = {'quotas': {'instances': None}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(quota, never).save()
self.assertEquals(200, result.status)
def test_update_resource_instance(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
body = {'quotas': {'instances': 2}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, times=1).save()
self.assertTrue('instances' in result._data['quotas'])
self.assertEquals(200, result.status)
self.assertEquals(2, result._data['quotas']['instances'])
@testtools.skipIf(not CONF.trove_volume_support,
'Volume support is not enabled')
def test_update_resource_volume(self):
instance_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='instances').thenReturn(instance_quota)
volume_quota = mock(Quota)
when(DatabaseModelBase).find_by(
tenant_id=FAKE_TENANT2,
resource='volumes').thenReturn(volume_quota)
body = {'quotas': {'instances': None, 'volumes': 10}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
verify(instance_quota, never).save()
self.assertFalse('instances' in result._data['quotas'])
verify(volume_quota, times=1).save()
self.assertEquals(200, result.status)
self.assertEquals(10, result._data['quotas']['volumes'])
class DbQuotaDriverTest(testtools.TestCase):
def setUp(self):
super(DbQuotaDriverTest, self).setUp()
self.driver = DbQuotaDriver(resources)
self.orig_Quota_find_all = Quota.find_all
self.orig_QuotaUsage_find_all = QuotaUsage.find_all
self.orig_QuotaUsage_find_by = QuotaUsage.find_by
self.orig_Reservation_create = Reservation.create
self.orig_QuotaUsage_create = QuotaUsage.create
self.orig_QuotaUsage_save = QuotaUsage.save
self.orig_Reservation_save = Reservation.save
self.mock_quota_result = Mock()
self.mock_usage_result = Mock()
Quota.find_all = Mock(return_value=self.mock_quota_result)
QuotaUsage.find_all = Mock(return_value=self.mock_usage_result)
def tearDown(self):
super(DbQuotaDriverTest, self).tearDown()
Quota.find_all = self.orig_Quota_find_all
QuotaUsage.find_all = self.orig_QuotaUsage_find_all
QuotaUsage.find_by = self.orig_QuotaUsage_find_by
Reservation.create = self.orig_Reservation_create
QuotaUsage.create = self.orig_QuotaUsage_create
QuotaUsage.save = self.orig_QuotaUsage_save
Reservation.save = self.orig_Reservation_save
def test_get_defaults(self):
defaults = self.driver.get_defaults(resources)
self.assertEqual(CONF.max_instances_per_user,
defaults[Resource.INSTANCES])
self.assertEqual(CONF.max_volumes_per_user,
defaults[Resource.VOLUMES])
def test_get_quota_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=12)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.INSTANCES, quota.resource)
self.assertEquals(12, quota.hard_limit)
def test_get_quota_by_tenant_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, quota.tenant_id)
self.assertEquals(Resource.VOLUMES, quota.resource)
self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit)
def test_get_all_quotas_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22),
Quota(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
hard_limit=15)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_all_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(CONF.max_instances_per_user,
quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_one_default(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEquals(CONF.max_volumes_per_user,
quotas[Resource.VOLUMES].hard_limit)
def test_get_quota_usage_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=3,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(3, usage.in_use)
self.assertEquals(1, usage.reserved)
def test_get_quota_usage_by_tenant_default(self):
FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(return_value=FAKE_QUOTA)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEquals(FAKE_TENANT1, usage.tenant_id)
self.assertEquals(Resource.VOLUMES, usage.resource)
self.assertEquals(0, usage.in_use)
self.assertEquals(0, usage.reserved)
def test_get_all_quota_usages_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=2,
reserved=1),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(2, usages[Resource.INSTANCES].in_use)
self.assertEquals(1, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(1, usages[Resource.VOLUMES].in_use)
self.assertEquals(1, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_all_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_one_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0)]
NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEquals(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEquals(0, usages[Resource.INSTANCES].in_use)
self.assertEquals(0, usages[Resource.INSTANCES].reserved)
self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEquals(0, usages[Resource.VOLUMES].in_use)
self.assertEquals(0, usages[Resource.VOLUMES].reserved)
def test_reserve(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': 2, 'volumes': 3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(2, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_reserve_resource_unknown(self):
delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123}
self.assertRaises(exception.QuotaResourceUnknown,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_usage(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 5, 'volumes': 3}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_reserved(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 4, 'volumes': 2}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_but_can_apply_negative_deltas(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=10,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=50,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': -1, 'volumes': -3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
_, kw = Reservation.create.call_args_list[0]
self.assertEquals(1, kw['usage_id'])
self.assertEquals(-1, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
_, kw = Reservation.create.call_args_list[1]
self.assertEquals(2, kw['usage_id'])
self.assertEquals(-3, kw['delta'])
self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])
def test_commit(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.commit(FAKE_RESERVATIONS)
self.assertEqual(6, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[0].status)
self.assertEqual(3, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[1].status)
def test_rollback(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.rollback(FAKE_RESERVATIONS)
self.assertEqual(5, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[0].status)
self.assertEqual(1, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[1].status)
| 1.914063 | 2 |
analisador_sintatico/blueprints/api/parsers.py | viniciusandd/uri-analisador-sintatico | 0 | 4557 | from flask_restful import reqparse
def retornar_parser():
parser = reqparse.RequestParser()
parser.add_argument('sentenca', type=str, required=True)
return parser
| 2.25 | 2 |
demo_large_image.py | gunlyungyou/AerialDetection | 9 | 4558 | <reponame>gunlyungyou/AerialDetection<filename>demo_large_image.py<gh_stars>1-10
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인',
'다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로')
CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train',
'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout')
CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)}
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
self.classnames = self.dataset.CLASSES
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
def inference_single(self, imagname, slide_size, chip_size):
img = mmcv.imread(imagname)
height, width, channel = img.shape
slide_h, slide_w = slide_size
hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]
for i in tqdm(range(int(width / slide_w + 1))):
for j in range(int(height / slide_h) + 1):
subimg = np.zeros((hn, wn, channel))
# print('i: ', i, 'j: ', j)
chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3]
subimg[:chip.shape[0], :chip.shape[1], :] = chip
chip_detections = inference_detector(self.model, subimg)
# print('result: ', result)
for cls_id, name in enumerate(self.classnames):
chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w
chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h
# import pdb;pdb.set_trace()
try:
total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))
except:
import pdb; pdb.set_trace()
# nms
for i in range(len(self.classnames)):
keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)
total_detections[i] = total_detections[i][keep]
return total_detections
def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):
detections = self.inference_single(srcpath, slide_size, chip_size)
classnames = [cls if cls not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames]
img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
#roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py',
# r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth')
#roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py',
# r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth')
roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py',
r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth')
from glob import glob
roksis = glob('data/roksi2020/val/images/*.png')
#target = roksis[1]
#out = target.split('/')[-1][:-4]+'_out.jpg'
#roitransformer.inference_single_vis(target,
# os.path.join('demo', out),
# (512, 512),
# (1024, 1024))
for target in roksis[:100]:
out = target.split('/')[-1][:-4]+'_out.jpg'
print(os.path.join('demo/fasterrcnn', out))
roitransformer.inference_single_vis(target,
os.path.join('demo/fasterrcnn', out),
(512, 512),
(1024, 1024))
#roitransformer.inference_single_vis(r'demo/P0009.jpg',
# r'demo/P0009_out.jpg',
# (512, 512),
# (1024, 1024))
| 2.046875 | 2 |
ImageSearcher/admin.py | carpensa/dicom-harpooner | 1 | 4559 | from django.contrib import admin
from dicoms.models import Subject
from dicoms.models import Session
from dicoms.models import Series
admin.site.register(Session)
admin.site.register(Subject)
admin.site.register(Series)
| 1.132813 | 1 |
src/djangoreactredux/wsgi.py | noscripter/django-react-redux-jwt-base | 4 | 4560 | <filename>src/djangoreactredux/wsgi.py
"""
WSGI config for django-react-redux-jwt-base project.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoreactredux.settings.dev")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 1.296875 | 1 |
simple_settings/dynamic_settings/base.py | matthewh/simple-settings | 0 | 4561 | <gh_stars>0
# -*- coding: utf-8 -*-
import re
from copy import deepcopy
import jsonpickle
class BaseReader(object):
"""
Base class for dynamic readers
"""
_default_conf = {}
def __init__(self, conf):
self.conf = deepcopy(self._default_conf)
self.conf.update(conf)
self.key_pattern = self.conf.get('pattern')
self.auto_casting = self.conf.get('auto_casting')
self.key_prefix = self.conf.get('prefix')
def get(self, key):
if not self._is_valid_key(key):
return
result = self._get(self._qualified_key(key))
if self.auto_casting and (result is not None):
result = jsonpickle.decode(result)
return result
def set(self, key, value):
if not self._is_valid_key(key):
return
if self.auto_casting:
value = jsonpickle.encode(value)
self._set(self._qualified_key(key), value)
def _is_valid_key(self, key):
if not self.key_pattern:
return True
return bool(re.match(self.key_pattern, key))
def _qualified_key(self, key):
"""
Prepends the configured prefix to the key (if applicable).
:param key: The unprefixed key.
:return: The key with any configured prefix prepended.
"""
pfx = self.key_prefix if self.key_prefix is not None else ''
return '{}{}'.format(pfx, key)
| 2.765625 | 3 |
scripts/map_frame_to_utm_tf_publisher.py | coincar-sim/lanelet2_interface_ros | 7 | 4562 | #!/usr/bin/env python
#
# Copyright (c) 2018
# FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de)
# KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import roslib
import rospy
import tf
import tf2_ros
import geometry_msgs.msg
import lanelet2
stb = None
static_transform = None
lat_origin = None
lon_origin = None
map_frame_id = None
actual_utm_with_no_offset_frame_id = None
def timer_callback(event):
global stb, static_transform
static_transform.header.stamp = rospy.Time.now()
stb.sendTransform(static_transform)
def wait_for_params_successful():
global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id
for i in range(3000):
try:
lat_origin = float(rospy.get_param("/lanelet2_interface_ros/lat_origin"))
lon_origin = float(rospy.get_param("/lanelet2_interface_ros/lon_origin"))
map_frame_id = rospy.get_param("/lanelet2_interface_ros/map_frame_id")
actual_utm_with_no_offset_frame_id = rospy.get_param(
"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id")
except Exception:
rospy.sleep(0.01)
continue
return True
return False
if __name__ == '__main__':
rospy.init_node('map_frame_to_utm_tf_publisher')
if not wait_for_params_successful():
rospy.logerr("map_frame_to_utm_tf_publisher: Could not initialize")
exit()
origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin)
projector = lanelet2.projection.UtmProjector(
lanelet2.io.Origin(origin_latlon), False, False)
origin_xy = projector.forward(origin_latlon)
stb = tf2_ros.TransformBroadcaster()
static_transform = geometry_msgs.msg.TransformStamped()
static_transform.header.stamp = rospy.Time.now()
static_transform.header.frame_id = map_frame_id
static_transform.child_frame_id = actual_utm_with_no_offset_frame_id
static_transform.transform.translation.x = -origin_xy.x
static_transform.transform.translation.y = -origin_xy.y
static_transform.transform.translation.z = 0.0
q = tf.transformations.quaternion_from_euler(0, 0, 0)
static_transform.transform.rotation.x = q[0]
static_transform.transform.rotation.y = q[1]
static_transform.transform.rotation.z = q[2]
static_transform.transform.rotation.w = q[3]
rospy.Timer(rospy.Duration(1.), timer_callback)
rospy.spin()
| 1.40625 | 1 |
lectures/05-python-intro/examples/argv.py | mattmiller899/biosys-analytics | 14 | 4563 | #!/usr/bin/env python3
import sys
print(sys.argv)
| 1.515625 | 2 |
tests/fixtures.py | easyas314159/cnftools | 0 | 4564 | <gh_stars>0
from itertools import chain
def make_comparable(*clauses):
return set((frozenset(c) for c in chain(*clauses)))
def count_clauses(*clauses):
total = 0
for subclauses in clauses:
total += len(subclauses)
return total
def unique_literals(*clauses):
literals = set()
for clause in chain(*clauses):
literals.update((abs(l) for l in clause))
return literals
| 3.078125 | 3 |
applications/FluidDynamicsApplication/tests/sod_shock_tube_test.py | Rodrigo-Flo/Kratos | 0 | 4565 | # Import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.kratos_utilities as KratosUtilities
from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis
class SodShockTubeTest(KratosUnittest.TestCase):
def testSodShockTubeExplicitASGS(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = False
self.shock_capturing = False
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitASGSShockCapturing(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = False
self.shock_capturing = True
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitOSS(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = True
self.shock_capturing = False
self._CustomizeSimulationSettings()
def testSodShockTubeExplicitOSSShockCapturing(self):
self.solver_type = "CompressibleExplicit"
self.use_oss = True
self.shock_capturing = True
self._CustomizeSimulationSettings()
def setUp(self):
self.print_output = False
self.print_reference_values = False
self.check_absolute_tolerance = 1.0e-8
self.check_relative_tolerance = 1.0e-10
self.work_folder = "sod_shock_tube_test"
settings_filename = "ProjectParameters.json"
# Read the simulation settings
with KratosUnittest.WorkFolderScope(self.work_folder,__file__):
with open(settings_filename,'r') as parameter_file:
self.parameters = KratosMultiphysics.Parameters(parameter_file.read())
def runTest(self):
# If required, add the output process to the test settings
if self.print_output:
self._AddOutput()
# If required, add the reference values output process to the test settings
if self.print_reference_values:
self._AddReferenceValuesOutput()
else:
self._AddReferenceValuesCheck()
# Create the test simulation
with KratosUnittest.WorkFolderScope(self.work_folder,__file__):
self.model = KratosMultiphysics.Model()
simulation = FluidDynamicsAnalysis(self.model, self.parameters)
simulation.Run()
def tearDown(self):
with KratosUnittest.WorkFolderScope(self.work_folder, __file__):
KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time')
def _CustomizeSimulationSettings(self):
# Customize simulation settings
self.parameters["solver_settings"]["solver_type"].SetString(self.solver_type)
self.parameters["solver_settings"]["use_oss"].SetBool(self.use_oss)
self.parameters["solver_settings"]["shock_capturing"].SetBool(self.shock_capturing)
def _AddOutput(self):
gid_output_settings = KratosMultiphysics.Parameters("""{
"python_module" : "gid_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "GiDOutputProcess",
"help" : "This process writes postprocessing files for GiD",
"Parameters" : {
"model_part_name" : "FluidModelPart",
"output_name" : "TO_BE_DEFINED",
"postprocess_parameters" : {
"result_file_configuration" : {
"gidpost_flags" : {
"GiDPostMode" : "GiD_PostBinary",
"WriteDeformedMeshFlag" : "WriteDeformed",
"WriteConditionsFlag" : "WriteConditions",
"MultiFileFlag" : "SingleFile"
},
"file_label" : "step",
"output_control_type" : "step",
"output_frequency" : 1.0,
"body_output" : true,
"node_output" : false,
"skin_output" : false,
"plane_output" : [],
"nodal_results" : ["DENSITY","MOMENTUM","TOTAL_ENERGY"],
"gauss_point_results" : ["SHOCK_SENSOR","THERMAL_SENSOR","SHEAR_SENSOR"],
"nodal_nonhistorical_results" : ["ARTIFICIAL_BULK_VISCOSITY","ARTIFICIAL_CONDUCTIVITY","ARTIFICIAL_DYNAMIC_VISCOSITY"]
},
"point_data_configuration" : []
}
}
}""")
output_name = "sod_shock_tube{0}{1}{2}".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
gid_output_settings["Parameters"]["output_name"].SetString(output_name)
self.parameters["output_processes"]["gid_output"].Append(gid_output_settings)
def _AddReferenceValuesOutput(self):
json_output_settings = KratosMultiphysics.Parameters("""{
"python_module" : "json_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "JsonOutputProcess",
"Parameters" : {
"output_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],
"output_file_name" : "TO_BE_DEFINED",
"model_part_name" : "FluidModelPart.FluidParts_Fluid",
"time_frequency" : 0.025
}
}""")
output_file_name = "sod_shock_tube{0}{1}{2}_results.json".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
json_output_settings["Parameters"]["output_file_name"].SetString(output_file_name)
self.parameters["processes"]["json_check_process_list"].Append(json_output_settings)
def _AddReferenceValuesCheck(self):
json_check_settings = KratosMultiphysics.Parameters("""{
"python_module" : "from_json_check_result_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "FromJsonCheckResultProcess",
"Parameters" : {
"check_variables" : ["DENSITY","MOMENTUM_X","MOMENTUM_Y","TOTAL_ENERGY"],
"input_file_name" : "TO_BE_DEFINED",
"model_part_name" : "FluidModelPart.FluidParts_Fluid",
"tolerance" : 0.0,
"relative_tolerance" : 0.0,
"time_frequency" : 0.025
}
}""")
input_file_name = "sod_shock_tube{0}{1}{2}_results.json".format(
"_explicit" if self.solver_type == "CompressibleExplicit" else "_implicit",
"_ASGS" if self.use_oss == False else "_OSS",
"_SC" if self.shock_capturing else "")
json_check_settings["Parameters"]["input_file_name"].SetString(input_file_name)
json_check_settings["Parameters"]["tolerance"].SetDouble(self.check_absolute_tolerance)
json_check_settings["Parameters"]["relative_tolerance"].SetDouble(self.check_relative_tolerance)
self.parameters["processes"]["json_check_process_list"].Append(json_check_settings)
if __name__ == '__main__':
test = SodShockTubeTest()
test.setUp()
# test.testSodShockTubeExplicitASGS()
test.testSodShockTubeExplicitASGSShockCapturing()
# test.testSodShockTubeExplicitOSS()
# test.testSodShockTubeExplicitOSSShockCapturing()
test.runTest()
test.tearDown()
| 1.929688 | 2 |
src/controllers/__init__.py | TonghanWang/NDQ | 63 | 4566 | from .basic_controller import BasicMAC
from .cate_broadcast_comm_controller import CateBCommMAC
from .cate_broadcast_comm_controller_full import CateBCommFMAC
from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC
from .tar_comm_controller import TarCommMAC
from .cate_pruned_broadcast_comm_controller import CatePBCommMAC
REGISTRY = {"basic_mac": BasicMAC,
"cate_broadcast_comm_mac": CateBCommMAC,
"cate_broadcast_comm_mac_full": CateBCommFMAC,
"cate_broadcast_comm_mac_not_IB": CateBCommNIBMAC,
"tar_comm_mac": TarCommMAC,
"cate_pruned_broadcast_comm_mac": CatePBCommMAC}
| 1.3125 | 1 |
main.py | 1999foxes/run-cmd-from-websocket | 0 | 4567 | import asyncio
import json
import logging
import websockets
logging.basicConfig()
async def counter(websocket, path):
try:
print("connect")
async for message in websocket:
print(message)
finally:
USERS.remove(websocket)
async def main():
async with websockets.serve(counter, "localhost", 5000):
await asyncio.Future() # run forever
if __name__ == "__main__":
asyncio.run(main())
| 2.796875 | 3 |
3d_Vnet/3dvnet.py | GingerSpacetail/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks | 100 | 4568 | <filename>3d_Vnet/3dvnet.py
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import tensorflow as tf
import keras.backend as K
from keras.utils import to_categorical
from keras import metrics
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum
from keras.layers.core import Lambda, RepeatVector, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D
from keras.layers.merge import concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from sklearn.utils import class_weight
from keras.callbacks import ModelCheckpoint
from keras.callbacks import CSVLogger
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import PReLU
import os
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
# from medpy.io import load
import numpy as np
#import cv2
import nibabel as nib
from PIL import Image
def conv_block(input_mat,num_filters,kernel_size,batch_norm):
X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X)
if batch_norm:
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = add([input_mat,X]);
return X
def Vnet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm = True):
#c1 = conv_block(input_img,n_filters,3,batch_norm)
c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1) , padding='same')(input_img)
#c1 = add([c1,input_img])
c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2) , padding = 'same' )(c1)
c3 = conv_block(c2 , n_filters*2,5,True)
p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2), padding = 'same')(c3)
p3 = Dropout(dropout)(p3)
c4 = conv_block(p3, n_filters*4,5,True)
p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c4)
p4 = Dropout(dropout)(p4)
c5 = conv_block(p4, n_filters*8,5,True)
p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c5)
p6 = Dropout(dropout)(p6)
#c6 = conv_block(p5, n_filters*8,5,True)
#p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c6)
p7 = conv_block(p6,n_filters*16,5,True)
u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7);
u6 = concatenate([u6,c5]);
c7 = conv_block(u6,n_filters*16,5,True)
c7 = Dropout(dropout)(c7)
u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7);
u8 = concatenate([u7,c4]);
c8 = conv_block(u8,n_filters*8,5,True)
c8 = Dropout(dropout)(c8)
u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8);
u9 = concatenate([u9,c3]);
c9 = conv_block(u9,n_filters*4,5,True)
c9 = Dropout(dropout)(c9)
u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9);
u10 = concatenate([u10,c1]);
c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding = 'same')(u10);
c10 = Dropout(dropout)(c10)
c10 = add([c10,u10]);
#c9 = conv_block(u9,n_filters,3,batch_norm)
outputs = Conv3D(4, (1,1,1), activation='softmax')(c10)
model = Model(inputs=input_img, outputs=outputs)
return model
| 2.140625 | 2 |
vk/types/additional/active_offer.py | Inzilkin/vk.py | 24 | 4569 | from ..base import BaseModel
# returned from https://vk.com/dev/account.getActiveOffers
class ActiveOffer(BaseModel):
id: str = None
title: str = None
instruction: str = None
instruction_html: str = None
short_description: str = None
description: str = None
img: str = None
tag: str = None
price: int = None
| 1.703125 | 2 |
lib/networks/Resnet50_train.py | yangxue0827/TF_Deformable_Net | 193 | 4570 | <reponame>yangxue0827/TF_Deformable_Net
# --------------------------------------------------------
# TFFRCNN - Resnet50
# Copyright (c) 2016
# Licensed under The MIT License [see LICENSE for details]
# Written by miraclebiu
# --------------------------------------------------------
import tensorflow as tf
from .network import Network
from ..fast_rcnn.config import cfg
class Resnet50_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')
self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')
self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\
'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
self.trainable = trainable
self.setup()
def setup(self):
n_classes = cfg.NCLASSES
# anchor_scales = [8, 16, 32]
anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16, ]
(self.feed('data')
.conv(7, 7, 64, 2, 2, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1', is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID',name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1',is_training=False,relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c',is_training=False,relu=False))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')
.batch_normalization(name='bn3a_branch1',is_training=False,relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
.batch_normalization(relu=True, name='bn3b_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
.batch_normalization(relu=True, name='bn3b_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
.batch_normalization(name='bn3b_branch2c',is_training=False,relu=False))
(self.feed('res3a_relu',
'bn3b_branch2c')
.add(name='res3b')
.relu(name='res3b_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
.batch_normalization(relu=True, name='bn3c_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
.batch_normalization(relu=True, name='bn3c_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
.batch_normalization(name='bn3c_branch2c',is_training=False,relu=False))
(self.feed('res3b_relu',
'bn3c_branch2c')
.add(name='res3c')
.relu(name='res3c_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
.batch_normalization(relu=True, name='bn3d_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
.batch_normalization(relu=True, name='bn3d_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
.batch_normalization(name='bn3d_branch2c',is_training=False,relu=False))
(self.feed('res3c_relu',
'bn3d_branch2c')
.add(name='res3d')
.relu(name='res3d_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')
.batch_normalization(name='bn4a_branch1',is_training=False,relu=False))
(self.feed('res3d_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
.batch_normalization(relu=True, name='bn4b_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
.batch_normalization(relu=True, name='bn4b_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
.batch_normalization(name='bn4b_branch2c',is_training=False,relu=False))
(self.feed('res4a_relu',
'bn4b_branch2c')
.add(name='res4b')
.relu(name='res4b_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
.batch_normalization(relu=True, name='bn4c_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
.batch_normalization(relu=True, name='bn4c_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
.batch_normalization(name='bn4c_branch2c',is_training=False,relu=False))
(self.feed('res4b_relu',
'bn4c_branch2c')
.add(name='res4c')
.relu(name='res4c_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
.batch_normalization(relu=True, name='bn4d_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
.batch_normalization(relu=True, name='bn4d_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
.batch_normalization(name='bn4d_branch2c',is_training=False,relu=False))
(self.feed('res4c_relu',
'bn4d_branch2c')
.add(name='res4d')
.relu(name='res4d_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
.batch_normalization(relu=True, name='bn4e_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
.batch_normalization(relu=True, name='bn4e_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
.batch_normalization(name='bn4e_branch2c',is_training=False,relu=False))
(self.feed('res4d_relu',
'bn4e_branch2c')
.add(name='res4e')
.relu(name='res4e_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
.batch_normalization(relu=True, name='bn4f_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
.batch_normalization(relu=True, name='bn4f_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
.batch_normalization(name='bn4f_branch2c',is_training=False,relu=False))
(self.feed('res4e_relu',
'bn4f_branch2c')
.add(name='res4f')
.relu(name='res4f_relu'))
#========= RPN ============
(self.feed('res4f_relu')
.conv(3,3,512,1,1,name='rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))
(self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info')
.anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))
# Loss of rpn_cls & rpn_boxes
(self.feed('rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
(self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas')
.proposal_target_layer(n_classes,name = 'roi-data'))
#========= RCNN ============
(self.feed('res4f_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch1'))
(self.feed('res4f_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch2a')
.relu(name='res5a_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros'))
(self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b')
.batch_normalization(relu=False, name='bn5a_branch2b')
.relu(name='res5a_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5a_branch2c'))
(self.feed('bn5a_branch1', 'bn5a_branch2c')
.add(name='res5a')
.relu(name='res5a_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5b_branch2a')
.relu(name='res5b_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros'))
(self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b')
.batch_normalization(relu=False, name='bn5b_branch2b')
.relu(name='res5b_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5b_branch2c'))
(self.feed('res5a_relu', 'bn5b_branch2c')
.add(name='res5b')
.relu(name='res5b_relu')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID')
.batch_normalization(relu=False, name='bn5c_branch2a')
.relu(name='res5c_branch2a_relu')
.conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') )
(self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset')
.deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b')
.batch_normalization(relu=False, name='bn5c_branch2b')
.relu(name='res5c_branch2b_relu')
.conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID')
.batch_normalization(relu=False, name='bn5c_branch2c'))
(self.feed('res5b_relu', 'bn5c_branch2c')
.add(name='res5c')
.relu(name='res5c_relu')
.conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1')
.relu(name='conv_new_1_relu'))
(self.feed('conv_new_1_relu', 'roi-data')
.deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t')
# .flatten_data(name='offset_flatten')
.fc(num_out=7 * 7 * 2, name='offset', relu=False)
.reshape(shape=(-1,2,7,7), name='offset_reshape'))
(self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape')
.deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool')
.fc(num_out=1024, name='fc_new_1')
.fc(num_out=1024, name='fc_new_2'))
(self.feed('fc_new_2')
.fc(num_out=n_classes, name='cls_score', relu=False)
.softmax(name='cls_prob'))
(self.feed('fc_new_2')
.fc(num_out=4*n_classes, name='bbox_pred', relu=False))
# (self.feed('res4f_relu','roi-data')
# .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling')
# .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID')
# .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')
# .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')
# .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False))
# (self.feed('res5a_branch2a_roipooling')
# .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID')
# .batch_normalization(name='bn5a_branch1',is_training=False,relu=False))
# (self.feed('bn5a_branch2c','bn5a_branch1')
# .add(name='res5a')
# .relu(name='res5a_relu')
# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')
# .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')
# .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')
# .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False))
# #pdb.set_trace()
# (self.feed('res5a_relu',
# 'bn5b_branch2c')
# .add(name='res5b')
# .relu(name='res5b_relu')
# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')
# .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False)
# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')
# .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False)
# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')
# .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False))
# #pdb.set_trace()
# (self.feed('res5b_relu',
# 'bn5c_branch2c')
# .add(name='res5c')
# .relu(name='res5c_relu')
# .fc(n_classes, relu=False, name='cls_score')
# .softmax(name='cls_prob'))
# (self.feed('res5c_relu')
# .fc(n_classes*4, relu=False, name='bbox_pred'))
| 2.34375 | 2 |
lib/aws_sso_lib/assignments.py | vdesjardins/aws-sso-util | 330 | 4571 | import re
import numbers
import collections
import logging
from collections.abc import Iterable
import itertools
import aws_error_utils
from .lookup import Ids, lookup_accounts_for_ou
from .format import format_account_id
LOGGER = logging.getLogger(__name__)
_Context = collections.namedtuple("_Context", [
"session",
"ids",
"principal",
"principal_filter",
"permission_set",
"permission_set_filter",
"target",
"target_filter",
"get_principal_names",
"get_permission_set_names",
"get_target_names",
"ou_recursive",
"cache",
"filter_cache"
])
def _filter(filter_cache, key, func, args):
if not func:
return True
if key not in filter_cache:
filter_cache[key] = func(*args)
return filter_cache[key]
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
def _is_principal_tuple(principal):
try:
return all([
len(principal) == 2,
isinstance(principal[0], str),
principal[0] in ["GROUP", "USER"],
isinstance(principal[1], str),
])
except:
return False
def _process_principal(principal):
if not principal:
return None
if isinstance(principal, str):
return [(None, principal)]
if _is_principal_tuple(principal):
return [tuple(principal)]
else:
return _flatten(_process_principal(p) for p in principal)
def _process_permission_set(ids, permission_set):
if not permission_set:
return None
if not isinstance(permission_set, str) and isinstance(permission_set, Iterable):
return _flatten(_process_permission_set(ids, ps) for ps in permission_set)
if permission_set.startswith("arn"):
permission_set_arn = permission_set
elif permission_set.startswith("ssoins-") or permission_set.startswith("ins-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{permission_set}"
elif permission_set.startswith("ps-"):
permission_set_arn = f"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}"
else:
raise TypeError(f"Invalid permission set id {permission_set}")
return [permission_set_arn]
def _is_target_tuple(target):
try:
return all([
len(target) == 2,
isinstance(target[0], str),
target[0] in ["AWS_OU", "AWS_ACCOUNT"],
isinstance(target[1], str),
])
except:
return False
def _process_target(target):
if not target:
return None
if isinstance(target, numbers.Number):
return [("AWS_ACCOUNT", format_account_id(target))]
if isinstance(target, str):
if re.match(r"^\d+$", target):
return [("AWS_ACCOUNT", format_account_id(target))]
elif re.match(r"^r-[a-z0-9]{4,32}$", target) or re.match(r"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$", target):
return [("AWS_OU", target)]
else:
raise TypeError(f"Invalid target {target}")
elif _is_target_tuple(target):
target_type, target_id = target
if target_type not in ["AWS_ACCOUNT", "AWS_OU"]:
raise TypeError(f"Invalid target type {target_type}")
return [(target_type, target_id)]
else:
value = _flatten(_process_target(t) for t in target)
return value
def _get_account_iterator(target, context: _Context):
def target_iterator():
target_name = None
if context.get_target_names:
organizations_client = context.session.client("organizations")
account = organizations_client.describe_account(AccountId=target[1])["Account"]
if account.get("Name"):
target_name = account["Name"]
value = (*target, target_name)
if not _filter(context.filter_cache, value[1], context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
else:
LOGGER.debug(f"Visiting single account: {value}")
yield value
return target_iterator
def _get_ou_iterator(target, context: _Context):
def target_iterator():
target_name = None
# if context.get_target_names:
# organizations_client = context.session.client("organizations")
# ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])["OrganizationalUnit"]
# if ou.get("Name"):
# target_name = ou("Name")
value = (*target, target_name)
accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive)
for account in accounts:
yield "AWS_ACCOUNT", account["Id"], account["Name"]
return target_iterator
def _get_single_target_iterator(target, context: _Context):
target_type = target[0]
if target_type == "AWS_ACCOUNT":
return _get_account_iterator(target, context)
elif target_type == "AWS_OU":
return _get_ou_iterator(target, context)
else:
raise TypeError(f"Invalid target type {target_type}")
def _get_all_accounts_iterator(context: _Context):
def target_iterator():
organizations_client = context.session.client("organizations")
accounts_paginator = organizations_client.get_paginator("list_accounts")
for response in accounts_paginator.paginate():
LOGGER.debug(f"ListAccounts page: {response}")
for account in response["Accounts"]:
account_id = account["Id"]
account_name = account["Name"]
value = ("AWS_ACCOUNT", account_id, account_name)
if not _filter(context.filter_cache, account_id, context.target_filter, value):
LOGGER.debug(f"Account is filtered: {value}")
continue
LOGGER.debug(f"Visiting account: {value}")
yield value
return target_iterator
def _get_target_iterator(context: _Context):
if context.target:
iterables = [_get_single_target_iterator(t, context) for t in context.target]
def target_iterator():
return itertools.chain(*[it() for it in iterables])
return target_iterator
else:
LOGGER.debug(f"Iterating for all accounts")
return _get_all_accounts_iterator(context)
def _get_single_permission_set_iterator(permission_set, context: _Context):
permission_set_arn = permission_set
permission_set_id = permission_set_arn.split("/")[-1]
def permission_set_iterator(target_type, target_id, target_name):
if not context.get_permission_set_names:
permission_set_name = None
else:
sso_admin_client = context.session.client("sso-admin")
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
permission_set_name = response["PermissionSet"]["Name"]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Single permission set is filtered: {(permission_set_id, permission_set_name)}")
else:
LOGGER.debug(f"Visiting single permission set {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_all_permission_sets_iterator(context: _Context):
def permission_set_iterator(target_type, target_id, target_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
permission_sets_paginator = sso_admin_client.get_paginator("list_permission_sets_provisioned_to_account")
for response in permission_sets_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id):
LOGGER.debug(f"ListPermissionSetsProvisionedToAccount {target_id} page: {response}")
if "PermissionSets" not in response:
continue
for permission_set_arn in response["PermissionSets"]:
permission_set_id = permission_set_arn.split("/", 2)[-1]
if not context.get_permission_set_names:
permission_set_name = None
else:
if permission_set_arn not in context.cache:
response = sso_admin_client.describe_permission_set(
InstanceArn=context.ids.instance_arn,
PermissionSetArn=permission_set_arn
)
LOGGER.debug(f"DescribePermissionSet response: {response}")
context.cache[permission_set_arn] = response["PermissionSet"]["Name"]
permission_set_name = context.cache[permission_set_arn]
if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):
LOGGER.debug(f"Permission set is filtered: {(permission_set_id, permission_set_name)}")
continue
LOGGER.debug(f"Visiting permission set: {(permission_set_id, permission_set_name)}")
yield permission_set_arn, permission_set_id, permission_set_name
return permission_set_iterator
def _get_permission_set_iterator(context: _Context):
if context.permission_set:
iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]
def permission_set_iterator(target_type, target_id, target_name):
return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables])
return permission_set_iterator
else:
LOGGER.debug("Iterating for all permission sets")
return _get_all_permission_sets_iterator(context)
def _get_principal_iterator(context: _Context):
def principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
if target_type != "AWS_ACCOUNT":
raise TypeError(f"Unsupported target type {target_type}")
sso_admin_client = context.session.client("sso-admin")
identity_store_client = context.session.client("identitystore")
assignments_paginator = sso_admin_client.get_paginator("list_account_assignments")
for response in assignments_paginator.paginate(
InstanceArn=context.ids.instance_arn,
AccountId=target_id,
PermissionSetArn=permission_set_arn):
LOGGER.debug(f"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}")
if not response["AccountAssignments"] and not "NextToken" in response:
LOGGER.debug(f"No assignments for {target_id} {permission_set_arn.split('/')[-1]}")
for assignment in response["AccountAssignments"]:
principal_type = assignment["PrincipalType"]
principal_id = assignment["PrincipalId"]
LOGGER.debug(f"Visiting principal {principal_type}:{principal_id}")
if context.principal:
for principal in context.principal:
type_matches = (principal[0] is None or principal[0] != principal_type)
if type_matches and principal[1] == principal_id:
LOGGER.debug(f"Found principal {principal_type}:{principal_id}")
break
else:
LOGGER.debug(f"Principal {principal_type}:{principal_id} does not match principals")
continue
principal_key = (principal_type, principal_id)
if not context.get_principal_names:
principal_name = None
else:
if principal_key not in context.cache:
if principal_type == "GROUP":
try:
response = identity_store_client.describe_group(
IdentityStoreId=context.ids.identity_store_id,
GroupId=principal_id
)
LOGGER.debug(f"DescribeGroup response: {response}")
context.cache[principal_key] = response["DisplayName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
elif principal_type == "USER":
try:
response = identity_store_client.describe_user(
IdentityStoreId=context.ids.identity_store_id,
UserId=principal_id
)
LOGGER.debug(f"DescribeUser response: {response}")
context.cache[principal_key] = response["UserName"]
except aws_error_utils.catch_aws_error("ResourceNotFoundException"):
context.cache[principal_key] = None
else:
raise ValueError(f"Unknown principal type {principal_type}")
principal_name = context.cache[principal_key]
if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)):
if context.principal:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
else:
LOGGER.debug(f"Principal is filtered: {principal_type}:{principal_id}")
continue
LOGGER.debug(f"Visiting principal: {principal_type}:{principal_id}")
yield principal_type, principal_id, principal_name
return principal_iterator
Assignment = collections.namedtuple("Assignment", [
"instance_arn",
"principal_type",
"principal_id",
"principal_name",
"permission_set_arn",
"permission_set_name",
"target_type",
"target_id",
"target_name",
])
def list_assignments(
session,
instance_arn=None,
identity_store_id=None,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
"""Iterate over AWS SSO assignments.
Args:
session (boto3.Session): boto3 session to use
instance_arn (str): The SSO instance to use, or it will be looked up using ListInstances
identity_store_id (str): The identity store to use if principal names are being retrieved
or it will be looked up using ListInstances
principal: A principal specification or list of principal specifications.
A principal specification is a principal id or a 2-tuple of principal type and id.
principal_filter: A callable taking principal type, principal id, and principal name
(which may be None), and returning True if the principal should be included.
permission_set: A permission set arn or id, or a list of the same.
permission_set_filter: A callable taking permission set arn and name (name may be None),
returning True if the permission set should be included.
target: A target specification or list of target specifications.
A target specification is an account or OU id, or a 2-tuple of target type, which
is either AWS_ACCOUNT or AWS_OU, and target id.
target_filter: A callable taking target type, target id, and target name
(which may be None), and returning True if the target should be included.
get_principal_names (bool): Retrieve names for principals in assignments.
get_permission_set_names (bool): Retrieve names for permission sets in assignments.
get_target_names (bool): Retrieve names for targets in assignments.
ou_recursive (bool): Set to True if an OU is provided as a target to get all accounts
including those in child OUs.
Returns:
An iterator over Assignment namedtuples
"""
ids = Ids(lambda: session, instance_arn, identity_store_id)
return _list_assignments(
session,
ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
)
def _list_assignments(
session,
ids,
principal=None,
principal_filter=None,
permission_set=None,
permission_set_filter=None,
target=None,
target_filter=None,
get_principal_names=False,
get_permission_set_names=False,
get_target_names=False,
ou_recursive=False):
principal = _process_principal(principal)
permission_set = _process_permission_set(ids, permission_set)
target = _process_target(target)
cache = {}
filter_cache = {}
context = _Context(
session = session,
ids=ids,
principal=principal,
principal_filter=principal_filter,
permission_set=permission_set,
permission_set_filter=permission_set_filter,
target=target,
target_filter=target_filter,
get_principal_names=get_principal_names,
get_permission_set_names=get_permission_set_names,
get_target_names=get_target_names,
ou_recursive=ou_recursive,
cache=cache,
filter_cache=filter_cache,
)
target_iterator = _get_target_iterator(context)
permission_set_iterator = _get_permission_set_iterator(context)
principal_iterator = _get_principal_iterator(context)
for target_type, target_id, target_name in target_iterator():
for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name):
for principal_type, principal_id, principal_name in principal_iterator(
target_type, target_id, target_name,
permission_set_arn, permission_set_id, permission_set_name):
assignment = Assignment(
ids.instance_arn,
principal_type,
principal_id,
principal_name,
permission_set_arn,
permission_set_name,
target_type,
target_id,
target_name,
)
LOGGER.debug(f"Visiting assignment: {assignment}")
yield assignment
if __name__ == "__main__":
import boto3
import sys
import json
logging.basicConfig(level=logging.INFO)
kwargs = {}
for v in sys.argv[1:]:
if hasattr(logging, v):
LOGGER.setLevel(getattr(logging, v))
else:
kwargs = json.loads(v)
def fil(*args):
print(args)
return True
kwargs["target_filter"] = fil
try:
session = boto3.Session()
print(",".join(Assignment._fields))
for value in list_assignments(session, **kwargs):
print(",".join(v or "" for v in value))
except KeyboardInterrupt:
pass
| 2.21875 | 2 |
solutions/pic_search/webserver/src/service/theardpool.py | naetimus/bootcamp | 1 | 4572 | <reponame>naetimus/bootcamp<filename>solutions/pic_search/webserver/src/service/theardpool.py
import threading
from concurrent.futures import ThreadPoolExecutor
from service.train import do_train
def thread_runner(thread_num, func, *args):
executor = ThreadPoolExecutor(thread_num)
f = executor.submit(do_train, *args)
| 2.625 | 3 |
buildutil/main.py | TediCreations/buildutils | 0 | 4573 | #!/usr/bin/env python3
import os
import argparse
import subprocess
if __name__ == '__main__':
from version import __version__
from configParser import ConfigParser
else:
from .version import __version__
from .configParser import ConfigParser
def command(cmd):
"""Run a shell command"""
subprocess.call(cmd, shell=True)
"""
cmd_split = cmd.split()
process = subprocess.Popen(cmd_split,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = process.communicate()
return stdout, stderr
"""
def main():
absFilePath = os.path.dirname(os.path.abspath(__file__))
cwdPath = os.path.abspath(os.getcwd())
parser = argparse.ArgumentParser(
prog="buildutil",
description="Assembly/C/C++ utility to build embedded systems",
epilog="Author: <NAME>",
fromfile_prefix_chars='@')
# parser.add_argument('-v', '--verbose',
# action='store_true',
# help='an optional argument')
"""
parser.add_argument('Path',
metavar='path',
type=str,
default=cwdPath,
help='the config filepath')
"""
parser.add_argument(
'-d', '--directory',
type=str,
default=cwdPath,
help='the config filepath')
parser.add_argument(
'-v', '--version',
action='store_true',
help='get the version of the build system')
# parser.add_argument(
# '-f',
# '--file',
# help='A readable file',
# metavar='FILE',
# type=argparse.FileType('r'),
# default=None)
cmd_parser = parser.add_subparsers(dest='cmd', description="")
parser_build = cmd_parser.add_parser(
'build',
help="build the project")
parser_get_version = cmd_parser.add_parser(
'get_version',
help="try to get the version from git")
# parser_get_version.add_argument(
# '-a', '--alpha',
# dest='alpha',
# help='try to get the version')
# Execute parse_args()
args = parser.parse_args()
subcommand = parser.parse_args().cmd
if args.version is True:
print(f"version: {__version__}")
exit(0)
# if subcommand is None or subcommand == "build":
if subcommand == "build":
makefilePath = os.path.join(absFilePath, "conf/make/Makefile")
command(f"make -f {makefilePath}")
elif subcommand == "get_version":
print("version")
else:
ConfigParser()
print("fuck")
return
# Working directory
wd = os.path.abspath(args.directory)
print(f"File: {absFilePath}")
print(F"CWD: {cwdPath}")
print(F"Working directory: {wd}")
print(F"makefile path: {makefilePath}")
print()
command(f"make -f {makefilePath}")
if __name__ == '__main__':
main()
| 2.640625 | 3 |
python/get_links.py | quiddity-wp/mediawiki-api-demos | 63 | 4574 | <filename>python/get_links.py
#This file is auto-generated. See modules.json and autogenerator.py for details
#!/usr/bin/python3
"""
get_links.py
MediaWiki API Demos
Demo of `Links` module: Get all links on the given page(s)
MIT License
"""
import requests
S = requests.Session()
URL = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"titles": "<NAME>",
"prop": "links"
}
R = S.get(url=URL, params=PARAMS)
DATA = R.json()
PAGES = DATA["query"]["pages"]
for k, v in PAGES.items():
for l in v["links"]:
print(l["title"])
| 3.1875 | 3 |
gautools/submit_gaussian.py | thompcinnamon/QM-calc-scripts | 0 | 4575 | <reponame>thompcinnamon/QM-calc-scripts<filename>gautools/submit_gaussian.py<gh_stars>0
#! /usr/bin/env python3
########################################################################
# #
# This script was written by <NAME> in 2015. #
# <EMAIL> <EMAIL> #
# #
# Copyright 2015 <NAME> IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3 because it should be good to
# be working on the newest version of python.
from __future__ import print_function
import argparse # For parsing commandline arguments
import datetime
import glob # Allows referencing file system/file names
import os
import re
import readline # Allows easier file input (with tab completion?)
import subprocess # Allows for submitting commands to the shell
from warnings import warn
from thtools import cd, make_obj_dir, save_obj, resolve_path
yes = ['y', 'yes', '1']
# An input function that can prefill in the text entry
# Not sure if this works in 3.5+ because raw_input is gone
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def _dir_and_file(path):
warn('_dir_and_file is deprecated. Use os.path.split instead',
DeprecationWarning)
if '/' in path:
rel_dir, f_name = path.rsplit('/', 1)
rel_dir = rel_dir + '/'
else:
rel_dir = ''
f_name = path
return rel_dir, f_name
def create_gau_input(coord_name, template, verbose=True):
"""
make gaussian input file by combining header and coordinates files
This function takes as input a file with a set of molecular
coordinates (the form should not matter, it will just be copied
into the next file) and a template file that should be the header
for the desired calculation (including charge and multiplicity),
returns the name of the file, and creates a Gaussian input file ending
with '.com'
:param str coord_name: name of file with coordinates in a format
Gaussian can read
:param str template: name of file with header for Gaussian calculation
(up to and including the charge and multiplicity)
:param bool verbose: If True, some status messages will be printed
(including file names)
:return: name of the written file
:rtype: str
"""
if verbose:
print('Creating Gaussian input file...')
_out_name = coord_name.rsplit('.', 1)[0] + '.com'
with open(_out_name, 'w') as out_file:
with open(template, 'r') as templ_file:
if verbose:
print('opened {}'.format(template))
for line in templ_file:
out_file.write(line)
if '\n' not in line:
out_file.write('\n')
with open(coord_name, 'r') as in_file:
if verbose:
print('opened {}'.format(coord_name))
for i, line in enumerate(in_file):
if i < 2:
# ignore first two lines
# number of atoms and the title/comment
continue
# if line.strip().isdigit():
# # the first line is the number of atoms
# continue
# # XYZ files created by mathematica have a comment
# # as the second line saying something like:
# # "Created by mathematica". Obv. want to ignore that
# if line.strip().startswith('Create') or
# line.strip().startswith('generated'):
# continue
# else:
out_file.write(line)
out_file.write('\n\n\n')
if verbose:
print('created Gaussian input file {}'.format(_out_name))
return _out_name
def get_input_files(base_name, batch):
_in_name_list = glob.glob(base_name + '*')
_in_name_list.sort() # sort files alphanumerically
_in_name_list.sort(key=len) # sort by length (because otherwise would
# put 1,10,11,... as opposed to 1,...,9,10,...
# if number 01,02,... They should all be the same length and the
# second sort won't do anything.
if not batch:
num_files = len(_in_name_list)
if num_files > 1:
print('Multiple files starting with {}'.format(base_name))
if input('Did you mean to execute a batch job? ') in yes:
batch = True
else:
print('What file name shall I use?')
_in_name_list = [rlinput('file name: ', base_name)]
return _in_name_list, batch
def use_template(template, in_names, verbose):
made_name_list = []
for in_name in in_names:
out_name = create_gau_input(in_name, template, verbose=verbose)
made_name_list.append(out_name)
if verbose:
print('Added {} to files to possibly submit.'.format(out_name))
_in_name_list = made_name_list
_in_name_list.sort()
_in_name_list.sort(key=len)
return _in_name_list
def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,
mem='125', executable='g09',
chk_file=None, copy_chk=False,
ln_running=None,
hold_jid=None, xyz=None, make_xyz=None, make_input=False,
ugt_dict=None):
"""
Write submission script for (Gaussian) jobs for submission to queue
If make_xyz is not None, the file make_xyz will be checked to exist
first to make sure to not waste time when missing a necessary input file.
:param str input_name: Name of the file to use as input
:param int num_cores: Number of cores to request
:param str time: Amount of time to request in the format 'hh:mm:ss'
:param bool verbose: If True, print out some status messages and such
:type mem: int or str
:param mem: Minimum amount of memory to request
:param str executable: Executable file to use for the job
Example, 'g09', 'g16'
:param str chk_file: If not None, this file will be copied back after the
job has completed. If this is not None and make_input is True,
this will also be passed to use_gen_template.
:param bool copy_chk: If this is True, the script will attempt to copy
what should be an existing checkpoint file to the scratch directory
before running the job. `chk_file` must be not None as well.
:param str ln_running: If not None, this will be the base name for
linking the output file to the current directory. If chk_file is not
None, it will also be linked with the same base name.
:param str hold_jid: Job on which this job should depend.
This should be the name of another job in the queuing system.
:param str xyz: Name of an xyz file to use as input to use_gen_template
(if make_input is True).
:param str make_xyz: The name of a file to pass to obabel to be used to
create an xyz file to pass to use_gen_template.
:param bool make_input: If True, use_gen_template will be used to create
input for the Gaussian calculation.
:param dict ugt_dict: dict of arguments to pass to use_gen_template.
This should not include out_file, xyz, nproc, mem, or checkpoint
because those will all be used from other arguments to this function.
out_file will be input_name; xyz will be xyz or a time-based name if
make_xyz is not None; nproc will be $NSLOTS (useful if this gets
changed after job submission); mem will be mem; and checkpoint will
be chk_file.
:return: The name of the script file
:rtype: str
"""
rel_dir, file_name = os.path.split(input_name)
if file_name.endswith('.com'):
short_name = os.path.splitext(file_name)[0]
if not short_name + '.com' == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
elif '.' in file_name:
short_name, input_extension = os.path.splitext(file_name)
if not short_name + '.' + input_extension == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
else:
short_name = file_name
file_name = short_name + '.com'
print('Assuming input file is {}'.format(file_name))
out_name = short_name + '.out'
job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)
if len(job_name) == 0:
job_name = 'default'
_script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')
temp_xyz = os.path.abspath('.temp' +
datetime.datetime.now().strftime('%H%M%S%f') +
'.xyz')
if xyz is None or make_xyz is not None:
n_xyz = temp_xyz
else:
n_xyz = resolve_path(xyz)
temp_pkl = temp_xyz[:-4]
if ugt_dict is not None:
make_obj_dir()
pkl_path = save_obj(ugt_dict, temp_pkl)
if chk_file is not None:
chk_line = 'checkpoint=\'{}\','.format(chk_file)
else:
chk_line = ''
with open(_script_name, 'w') as script_file:
sfw = script_file.write
sfw('#!/bin/bash -l\n\n')
sfw('#$ -pe omp {}\n'.format(num_cores))
sfw('#$ -M <EMAIL>\n')
sfw('#$ -m eas\n')
sfw('#$ -l h_rt={}\n'.format(time))
sfw('#$ -l mem_total={}G\n'.format(mem))
sfw('#$ -N {}\n'.format(job_name))
sfw('#$ -j y\n')
sfw('#$ -o {}.log\n\n'.format(short_name))
if hold_jid is not None:
sfw('#$ -hold_jid {}\n\n'.format(hold_jid))
if make_xyz is not None:
sfw('if [ ! -f {} ]; then\n'.format(
os.path.abspath(make_xyz)) +
' exit 17\n'
'fi\n\n')
sfw('module load wxwidgets/3.0.2\n')
sfw('module load openbabel/2.4.1\n\n')
sfw('obabel {} -O {}\n\n'.format(os.path.abspath(
make_xyz), os.path.abspath(n_xyz)))
if make_input:
sfw('python -c "from gautools.tools import '
'use_gen_template as ugt;\n'
'from thtools import load_obj, get_node_mem;\n'
'm = get_node_mem();\n'
'd = load_obj(\'{}\');\n'.format(
os.path.abspath(pkl_path)) +
'ugt(\'{}\',\'{}\','.format(
file_name, os.path.abspath(n_xyz)) +
'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +
'**d)"\n\n')
sfw('INPUTFILE={}\n'.format(file_name))
sfw('OUTPUTFILE={}\n'.format(out_name))
if chk_file is not None:
sfw('CHECKFILE={}\n\n'.format(chk_file))
else:
sfw('\n')
if ln_running is not None:
sfw('WORKINGOUT={}.out\n'.format(ln_running))
if chk_file is not None:
sfw('WORKINGCHK={}.chk\n\n'.format(ln_running))
else:
sfw('\n')
sfw('CURRENTDIR=`pwd`\n')
sfw('SCRATCHDIR=/scratch/$USER\n')
sfw('mkdir -p $SCRATCHDIR\n\n')
sfw('cd $SCRATCHDIR\n\n')
sfw('cp $CURRENTDIR/$INPUTFILE .\n')
if chk_file is not None:
sfw('# ') if not copy_chk else None
sfw('cp $CURRENTDIR/$CHECKFILE .\n\n')
else:
sfw('\n')
if ln_running is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '
'$CURRENTDIR/$WORKINGOUT\n')
if chk_file is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '
'$CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n')
sfw('echo About to run {} in /net/`'.format(executable) +
'hostname -s`$SCRATCHDIR\n\n')
sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))
sfw('\n\n')
if ln_running is not None:
sfw('rm $CURRENTDIR/$WORKINGOUT')
if chk_file is not None:
sfw(' $CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n\n')
sfw('cp $OUTPUTFILE $CURRENTDIR/.\n')
if chk_file is not None:
sfw('cp $CHECKFILE $CURRENTDIR/.\n\n')
else:
sfw('\n')
sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n')
sfw('echo output was copied to $CURRENTDIR\n\n')
if verbose:
print('script written to {}'.format(_script_name))
return _script_name
def submit_scripts(scripts, batch=False, submit=False, verbose=False):
outputs = []
if batch:
if submit or input('submit all jobs? ') in yes:
for script in scripts:
rd, f = _dir_and_file(script)
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('No jobs submitted, but scripts created')
else:
if submit or input('submit job {}? '.format(scripts[0])) in yes:
rd, f = _dir_and_file(scripts[0])
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('{} not submitted'.format(scripts))
_job_info = [' '.join(output.split(' ')[2:4]) for output in outputs]
return _job_info
if __name__ == '__main__':
description = 'Create and submit a script to run a Gaussian job on SCC'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('in_name',
help='Name of Gaussian input file')
parser.add_argument('-c', '--numcores', type=int, default=16,
help='Number of cores for job')
# I should probably check validity of this time request
# Maybe it doesn't matter so much because it just won't
# submit the job and it will give quick feedback about that?
parser.add_argument('-t', '--time',
help='Time required as "hh:mm:ss"',
default='12:00:00')
parser.add_argument('-e', '--executable', type=str, default='g09',
help='name of executable to run')
parser.add_argument('-b', '--batch', action='store_true',
help='create multiple scripts (batch job)')
parser.add_argument('-x', '--template', default=None,
help='template file for creating input from coords')
parser.add_argument('-s', '--submit', action='store_true',
help='Automatically submit jobs?')
parser.add_argument('-v', '--verbose', action='store_true',
help='make program more verbose')
parser.add_argument('-j', '--nojobinfo', action='store_false',
help='Do not return the submitted job information')
parser.add_argument('-k', '--chk_file', default=None,
help='checkpoint file to be written and copied back')
parser.add_argument('--copy_chk', action='store_true',
help='Copy check file to the scratch directory')
parser.add_argument('-l', '--ln_running', type=str, default=None,
help='base name for linking output to cwd while '
'running')
parser.add_argument('-d', '--hold_jid', default=None,
help='job on which this job should depend')
args = parser.parse_args()
in_name_list, args.batch = get_input_files(args.in_name, args.batch)
if args.template:
in_name_list = use_template(args.template, in_name_list, args.verbose)
script_list = []
for in_name in in_name_list:
script_name = write_sub_script(input_name=in_name,
num_cores=args.numcores,
time=args.time,
verbose=args.verbose,
executable=args.executable,
chk_file=args.chk_file,
copy_chk=args.copy_chk,
ln_running=args.ln_running,
hold_jid=args.hold_jid)
script_list.append(script_name)
if not len(script_list) == len(in_name_list):
# This should never be the case as far as I know, but I would
# like to make sure everything input gets a script and all the
# script names are there to be submitted.
raise IOError('num scripts dif. from num names given')
job_info = submit_scripts(script_list, args.batch, args.submit,
args.verbose)
if job_info and args.nojobinfo:
for job in job_info:
print(job)
if args.verbose:
print('Done. Completed normally.')
| 1.90625 | 2 |
experiments/recorder.py | WeiChengTseng/maddpg | 3 | 4576 | import json
import copy
import pdb
import numpy as np
import pickle
def listify_mat(matrix):
matrix = np.array(matrix).astype(str)
if len(matrix.shape) > 1:
matrix_list = []
for row in matrix:
try:
matrix_list.append(list(row))
except:
pdb.set_trace()
return matrix_list
else:
return list(matrix)
class Recorder():
def __init__(self):
self._traj, self._cur_traj = [], []
return
def pack_traj(self):
self._traj.append(copy.deepcopy(self._cur_traj))
self._cur_traj = []
return
def add(self, o, a, r, d):
# self._cur_traj.append((o, a, r, d))
self._cur_traj.append(
(listify_mat(o), listify_mat(a), listify_mat(r), d))
return
def export_pickle(self, filename='traj'):
if filename == '':
raise ValueError('incorrect file name')
traj = []
for t in self._traj:
obs = np.array([tt[0] for tt in t]).astype(np.float32)
act = np.array([tt[1] for tt in t]).astype(np.float32)
rwd = np.array([tt[2] for tt in t]).astype(np.float32)
done = np.array([tt[3] for tt in t])
# pdb.set_trace()
traj.append({
'observations': obs[:-1],
'next_observations': obs[1:],
'actions': act[:-1],
'rewards': rwd[:-1],
'terminals': done[:-1]
})
with open('{}.pkl'.format(filename), 'wb') as outfile:
pickle.dump(traj, outfile)
return
def export(self, filename='traj'):
if filename == '':
raise ValueError('incorrect file name')
traj = {'traj': []}
for t in self._traj:
traj['traj'].append(t)
# json.dumps(traj, sort_keys=True, indent=4)
pdb.set_trace()
with open('{}.json'.format(filename), 'w') as outfile:
json.dump(traj, outfile)
return | 2.65625 | 3 |
generate/dummy_data/mvp/gen_csv.py | ifekxp/data | 0 | 4577 | <reponame>ifekxp/data
from faker import Faker
import csv
# Reference: https://pypi.org/project/Faker/
output = open('data.CSV', 'w', newline='')
fake = Faker()
header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat']
mywriter=csv.writer(output)
mywriter.writerow(header)
for r in range(1000):
mywriter.writerow([
fake.name(),
fake.random_int(min=18, max=80, step=1),
fake.street_address(),
fake.city(),
fake.state(),
fake.zipcode(),
fake.longitude(),
fake.latitude()
])
output.close() | 2.71875 | 3 |
subir/ingreso/migrations/0004_auto_20191003_1509.py | Brandon1625/subir | 0 | 4578 | # Generated by Django 2.2.4 on 2019-10-03 21:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ingreso', '0003_auto_20190907_2152'),
]
operations = [
migrations.AlterField(
model_name='detalle_ingreso',
name='id_prod',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto'),
),
]
| 1.320313 | 1 |
pyscf/nao/test/test_0017_tddft_iter_nao.py | mfkasim1/pyscf | 3 | 4579 | from __future__ import print_function, division
import os,unittest
from pyscf.nao import tddft_iter
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname)
try:
from pyscf.lib import misc
libnao_gpu = misc.load_library("libnao_gpu")
td_gpu = tddft_iter(label='water', cd=dname, GPU=True)
except:
td_gpu = None
class KnowValues(unittest.TestCase):
def test_tddft_iter(self):
""" This is iterative TDDFT with SIESTA starting point """
self.assertTrue(hasattr(td, 'xocc'))
self.assertTrue(hasattr(td, 'xvrt'))
self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons
self.assertEqual(td.xocc[0].shape[0], 4)
self.assertEqual(td.xvrt[0].shape[0], 19)
dn0 = td.apply_rf0(td.moms1[:,0])
def test_tddft_iter_gpu(self):
""" Test GPU version """
if td_gpu is not None:
self.assertTrue(hasattr(td_gpu, 'xocc'))
self.assertTrue(hasattr(td_gpu, 'xvrt'))
self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence + H2 -- 2 electrons
self.assertEqual(td_gpu.xocc[0].shape[0], 4)
self.assertEqual(td_gpu.xvrt[0].shape[0], 19)
dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0])
if __name__ == "__main__": unittest.main()
| 2.328125 | 2 |
setup.py | dimasciput/osm2geojson | 0 | 4580 | import io
from os import path
from setuptools import setup
dirname = path.abspath(path.dirname(__file__))
with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def parse_requirements(filename):
lines = (line.strip() for line in open(path.join(dirname, filename)))
return [line for line in lines if line and not line.startswith("#")]
setup(
name='osm2geojson',
version='0.1.27',
license='MIT',
description='Parse OSM and Overpass JSON',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='geometry gis osm parsing',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/aspectumapp/osm2geojson',
packages=['osm2geojson'],
include_package_data=True,
install_requires=parse_requirements("requirements.txt")
)
| 2.078125 | 2 |
Cap_11/ex11.6.py | gguilherme42/Livro-de-Python | 4 | 4581 | import sqlite3
from contextlib import closing
nome = input('Nome do produto: ').lower().capitalize()
with sqlite3.connect('precos.db') as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,))
registro = cursor.fetchone()
if not(registro is None):
print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}')
valor = float(input('Novo valor: R$'))
cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?', (valor, registro[0]))
if cursor.rowcount == 1:
conexao.commit()
print('Alteração gravada.')
else:
conexao.rollback()
print('Alteração abortada.')
else:
print(f'Produto {nome} não encontrado.') | 3.59375 | 4 |
jet20/backend/solver.py | JTJL/jet20 | 1 | 4582 | <filename>jet20/backend/solver.py<gh_stars>1-10
import torch
import time
import copy
from jet20.backend.constraints import *
from jet20.backend.obj import *
from jet20.backend.config import *
from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED
import logging
logger = logging.getLogger(__name__)
class Solution(object):
def __init__(self,x,_vars,obj_value,status,duals):
self.status = status
self.obj_value = obj_value
self.vars = _vars
self.x = x
self.duals = None
def __str__(self):
return "obj_value: %s vars:%s" % (self.obj_value,self.vars)
__repr__ = __str__
class Problem(object):
def __init__(self,_vars,obj,le_cons=None,eq_cons=None):
self.obj = obj
self.le = le_cons
self.eq = eq_cons
self.vars = _vars
self.n = len(_vars)
@classmethod
def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device("cpu"),dtype=torch.float64):
def convert(x):
if x is not None:
if isinstance(x,torch.Tensor):
return x.type(dtype).to(device)
else:
return torch.tensor(x,dtype=dtype,device=device)
else:
return None
if obj is not None:
obj_Q,obj_b,obj_c = [convert(x) for x in obj]
if obj_Q is not None:
obj = QuadraticObjective(obj_Q,obj_b,obj_c)
elif obj_b is not None:
obj = LinearObjective(obj_b,obj_c)
if le is not None:
le_A,le_b = [convert(x) for x in le]
if le_b.ndim == 2 and le_b.size(0) == 1:
le_b = le_b.squeeze(0)
le = LinearLeConstraints(le_A,le_b)
if eq is not None:
eq_A,eq_b = [convert(x) for x in eq]
if eq_b.ndim == 2 and eq_b.size(0) == 1:
eq_b = eq_b.squeeze(0)
eq = LinearEqConstraints(eq_A,eq_b)
return cls(_vars,obj,le,eq)
def float(self):
if self.le is not None:
le = self.le.float()
else:
le = None
if self.eq is not None:
eq = self.eq.float()
else:
eq = None
obj = self.obj.float()
return self.__class__(self.vars,obj,le,eq)
def double(self):
if self.le is not None:
le = self.le.double()
else:
le = None
if self.eq is not None:
eq = self.eq.double()
else:
eq = None
obj = self.obj.double()
return self.__class__(self.vars,obj,le,eq)
def to(self,device):
if self.le is not None:
self.le.to(device)
else:
le = None
if self.eq is not None:
self.eq.to(device)
else:
eq = None
obj = self.obj.to(device)
return self.__class__(self.vars,obj,le,eq)
def build_solution(self,x,obj_value,status,duals):
_vars = { var: v.item() for var,v in zip(self.vars,x)}
return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals)
class Solver(object):
def __init__(self):
self.pres = []
self.posts = []
def solve(self,p,config,x=None):
for pre in self.pres:
start = time.time()
p,x = pre.preprocess(p,x,config)
logger.debug("preprocessing name:%s, time used:%s",pre.name(),time.time()-start)
if x is None:
x = torch.zeros(p.n).float().to(config.device)
start = time.time()
p_f32 = p.float()
x = x.float()
x,_,status,duals = solve(p_f32,x,config,fast=True)
logger.debug("fast mode, time used:%s",time.time()-start)
x = x.double()
if isinstance(duals,(tuple,list)):
duals = [d.double() for d in duals]
else:
duals = duals.double()
if status == SUB_OPTIMAL:
start = time.time()
# p = p.double()
x,_,status,duals = solve(p,x,config,fast=True,duals=duals)
logger.debug("fast-precision mode, time used:%s",time.time()-start)
if status == SUB_OPTIMAL:
start = time.time()
x,_,status,duals = solve(p,x,config,fast=False,duals=duals)
logger.debug("precision mode, time used:%s",time.time()-start)
if status != OPTIMAL:
logger.warning("optimal not found, status:%s",status)
for post in self.posts:
start = time.time()
p,x = post.postprocess(p,x,config)
logger.debug("postprocessing name:%s, time used:%s",post.name(),time.time()-start)
return p.build_solution(x,p.obj(x),status,duals)
def register_pres(self,*pres):
self.pres.extend(pres)
def register_posts(self,*posts):
self.posts.extend(posts)
| 2.15625 | 2 |
tests/test_transforms.py | mengfu188/mmdetection.bak | 2 | 4583 | import torch
from mmdet.datasets.pipelines.transforms import Pad
from mmdet.datasets.pipelines.transforms import FilterBox
import numpy as np
import cv2
def test_pad():
raw = dict(
img=np.zeros((200, 401, 3), dtype=np.uint8)
)
cv2.imshow('raw', raw['img'])
pad = Pad(square=True, pad_val=255)
r = pad(raw)
print(r['img'].shape)
cv2.imshow('draw', r['img'])
cv2.waitKey()
raw = dict(
img=np.zeros((402, 401, 3), dtype=np.uint8)
)
cv2.imshow('raw', raw['img'])
pad = Pad(square=True, pad_val=255)
r = pad(raw)
print(r['img'].shape)
cv2.imshow('draw', r['img'])
cv2.waitKey()
def test_filter_box():
bboxes = np.array([[0, 0, 10, 10],
[10, 10, 20, 20],
[10, 10, 19, 20],
[10, 10, 20, 19],
[10, 10, 19, 19]])
gt_bboxes = np.array([[0, 0, 10, 9]])
result = dict(gt_bboxes=bboxes)
fb = FilterBox((10, 10))
fb(result)
if __name__ == '__main__':
# test_pad()
test_filter_box()
| 2.515625 | 3 |
dev/Tools/build/waf-1.7.13/lmbrwaflib/unit_test_lumberyard_modules.py | akulamartin/lumberyard | 8 | 4584 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib import Errors
import lumberyard_modules
import unittest
import pytest
import utils
class FakeContext(object):
pass
class FakeIncludeSettings(object):
pass
class FakePlatformSettings(object):
def __init__(self, platform_name, aliases=set()):
self.platform = platform_name
self.aliases = aliases
class FakeConfigurationSettings(object):
def __init__(self, settings_name, base_config=None):
self.base_config = base_config
self.name = settings_name
class FakeConfiguration(object):
def __init__(self, settings, is_test=False, is_server=False):
self.settings = settings
self.is_test = is_test
self.is_server = is_server
@pytest.fixture()
def mock_parse_json(mock_json_map):
if not mock_json_map:
mock_json_map = {'path': {}}
def _mock_parse_json(path, _):
return mock_json_map[path]
old_parse_json_file = utils.parse_json_file
utils.parse_json_file = _mock_parse_json
yield
utils.parse_json_file = old_parse_json_file
@pytest.fixture()
def fake_context():
return FakeContext()
def test_SanitizeKWInput_SimpleKwDictionary_Success():
kw = dict(
libpath='mylib'
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success():
kw = dict(
libpath='mylib',
additional_settings=dict(stlibpath='mystlib')
)
lumberyard_modules.sanitize_kw_input(kw)
assert isinstance(kw['libpath'], list)
assert kw['libpath'][0] == 'mylib'
assert isinstance(kw['additional_settings'], list)
assert isinstance(kw['additional_settings'][0], dict)
assert isinstance(kw['additional_settings'][0]['stlibpath'], list)
assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib'
@pytest.mark.parametrize(
"target, kw_key, source_section, additional_aliases, merge_dict, expected", [
pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'),
pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'),
pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'),
pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'),
pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'),
pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'),
pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'),
pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'),
])
def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected):
fake_context = FakeContext()
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
if isinstance(expected,dict):
test_settings.merge_kw_key(target=target,
kw_key=kw_key,
source_section=source_section,
merge_kw=merge_dict)
assert merge_dict == expected
elif isinstance(expected, type(Errors.WafError)):
with pytest.raises(Errors.WafError):
test_settings.merge_kw_key(target=target,
kw_key=kw_key,
source_section=source_section,
merge_kw=merge_dict)
@pytest.mark.parametrize(
"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected", [
pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'),
pytest.param({}, 'include_test',
{
'path': {
'includes': ['include_test']
},'include_test': {}
}, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes')
])
def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected):
if fake_include_settings:
def _mock_get_project_settings_file(include_settings_file, additional_aliases):
assert fake_include_settings == include_settings_file
fake_settings = FakeIncludeSettings()
return fake_settings
fake_context.get_project_settings_file = _mock_get_project_settings_file
test = lumberyard_modules.ProjectSettingsFile(fake_context,
'path',
additional_aliases)
assert test.dict == expected
@pytest.mark.parametrize(
"mock_json_map, additional_aliases, section_key, expected", [
pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'),
pytest.param({
'path': {
"test_section": {
"key1": "value1"
}
}
}, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges')
])
def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected):
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
merge_dict = {}
test_settings.merge_kw_section(section_key=section_key,
target='test_target',
merge_kw=merge_dict)
assert expected == merge_dict
class ProjectSettingsTest(unittest.TestCase):
def setUp(self):
self.old_parse_json = utils.parse_json_file
utils.parse_json_file = self.mockParseJson
self.mock_json_map = {}
def tearDown(self):
utils.parse_json_file = self.old_parse_json
def mockParseJson(self, path, _):
return self.mock_json_map[path]
def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}):
self.mock_json_map = {'path': test_dict}
test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)
return test_settings
def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self):
"""
Test scenario:
Setup a project settings that contains other project settings, so that it can recursively call merge_kw_dict
recursively
"""
include_settings_file = 'include_test'
test_settings_single_include = {'includes': [include_settings_file]}
test_empty_settings = {}
test_merge_kw_key = 'passed'
test_merge_kw_value = True
self.mock_json_map = {'path': test_settings_single_include,
include_settings_file: test_empty_settings}
# Prepare a mock include settings object
test_include_settings = self.createSimpleSettings()
def _mock_merge_kw_dict(target, merge_kw, platform, configuration):
merge_kw[test_merge_kw_key] = test_merge_kw_value
pass
test_include_settings.merge_kw_dict = _mock_merge_kw_dict
# Prepare a mock context
fake_context = FakeContext()
def _mock_get_project_settings_file(_a, _b):
return test_include_settings
fake_context.get_project_settings_file = _mock_get_project_settings_file
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_settings_single_include)
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=None,
configuration=None)
self.assertIn(test_merge_kw_key, test_merge_kw)
self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value)
def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when only platform is set and not any configurations
"""
test_platform = 'test_platform'
test_alias = 'alias_1'
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform',
aliases={test_alias})
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform,
configuration=None)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform), sections_merged)
self.assertIn('{}/*'.format(test_alias), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 2)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is not a test nor
server configuration, but is derived from another configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
base_test_configuration_name = 'base_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name,
base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name))))
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 3)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test and a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 8)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a test but not a
server configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=True,
is_server=False)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test', sections_merged)
self.assertIn('{}/*/test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self):
"""
Test scenario:
Test the merge_kw_dict when the platform + configuration is set, and the configuration is a server but not a
test configuration
"""
test_platform_name = 'test_platform'
test_configuration_name = 'test_configuration'
test_configuration = FakeConfiguration(
settings=FakeConfigurationSettings(settings_name=test_configuration_name),
is_test=False,
is_server=True)
fake_context = FakeContext()
fake_platform_settings = FakePlatformSettings(platform_name='test_platform')
def _mock_get_platform_settings(platform):
self.assertEqual(platform, test_platform_name)
return fake_platform_settings
fake_context.get_platform_settings = _mock_get_platform_settings
test_dict = {}
test_settings = self.createSimpleSettings(fake_context=fake_context,
test_dict=test_dict)
sections_merged = set()
def _mock_merge_kw_section(section, target, merge_kw):
sections_merged.add(section)
pass
test_settings.merge_kw_section = _mock_merge_kw_section
test_merge_kw = {}
test_settings.merge_kw_dict(target='test_target',
merge_kw=test_merge_kw,
platform=test_platform_name,
configuration=test_configuration)
# Validate all the sections passed to the merge_kw_dict
self.assertIn('{}/*'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated', sections_merged)
self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/dedicated,test', sections_merged)
self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertIn('*/*/test,dedicated', sections_merged)
self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)
self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)
self.assertEqual(len(sections_merged), 11)
| 1.984375 | 2 |
linprog_curvefit.py | drofp/linprog_curvefit | 0 | 4585 | #!/usr/bin/env python3
"""Curve fitting with linear programming.
Minimizes the sum of error for each fit point to find the optimal coefficients
for a given polynomial.
Overview:
Objective: Sum of errors
Subject to: Bounds on coefficients
Credit: "Curve Fitting with Linear Programming", <NAME> and <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import string
from ortools.linear_solver import pywraplp
class ErrorDefinition(enum.Enum):
SUM_ABS_DEV = enum.auto()
SUM_MAX_DEVIATION = enum.auto()
def _generate_variables(solver, points, coeff_ranges, err_max, error_def):
"""Create coefficient variables.
Initial version works for up to 26 variable polynomial. One letter per
english alphabet used for coefficient names.
TODO(drofp): Figure out naming scheme for arbitrary number of variables.
"""
num_of_coeff = len(coeff_ranges)
variables = []
coeff_names = []
# Add coefficients to variable list.
if num_of_coeff == 2:
coeff_names.append('m')
coeff_names.append('b')
else:
for letter_cnt in range(num_of_coeff):
coeff_names.append(string.ascii_lowercase[letter_cnt])
for coeff_num in range(num_of_coeff):
if coeff_ranges[coeff_num][0] is None:
lower_bound = -solver.Infinity()
else:
lower_bound = coeff_ranges[coeff_num][0]
if coeff_ranges[coeff_num][1] is None:
upper_bound = solver.Infinity()
else:
upper_bound = coeff_ranges[coeff_num][1]
variables.append(
solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num]))
# Add absolute error variables to variable list
for point_cnt in range(len(points)):
positive_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_plus')
negative_err_var = solver.NumVar(
0, err_max, 'e' + str(point_cnt + 1) + '_minus')
variables.append(positive_err_var)
variables.append(negative_err_var)
return variables
def _generate_objective_fn(
solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV):
"""Generate objective function for given error definition."""
objective = solver.Objective()
for variable in variables[num_of_coeff:]:
objective.SetCoefficient(variable, 1)
return objective
def _generate_constraints(solver, points, num_of_coeff, variables):
constraints = []
for point_num, point in enumerate(points):
# Equivalency constraint
constraint = solver.Constraint(point[1], point[1])
# Resultant Coefficient terms
for coeff_num, coeff in enumerate(variables[:num_of_coeff]):
power = num_of_coeff - coeff_num - 1
x_val = point[0] ** power
constraint.SetCoefficient(coeff, x_val)
# Error terms
ex_plus = variables[num_of_coeff + 2 * point_num]
ex_minus = variables[num_of_coeff + 2 * point_num + 1]
constraint.SetCoefficient(ex_plus, -1)
constraint.SetCoefficient(ex_minus, 1)
constraints.append(constraint)
return constraints
def get_optimal_polynomial(
points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV,
err_max=10000, solver=None):
"""Optimize coefficients for any order polynomial.
Args:
points: A tuple of points, represented as tuples (x, y)
coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples
(min, max). Nubmer of elements in list determines order of polynomial,
from highest order (0th index) to lowest order (nth index).
err_def: An ErrorDefinition enum, specifying the definition for error.
err_max: An Integer, specifying the maximum error allowable.
solver: a ortools.pywraplp.Solver object, if a specific solver instance is
requested by caller.
Returns:
A Dictionary, the desired coefficients mapped to ther values.
"""
if coeff_ranges is None:
raise ValueError('Please provide appropriate coefficient range.')
if solver is None:
solver = pywraplp.Solver(
'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
variables = _generate_variables(
solver, points, coeff_ranges, err_max=err_max,
error_def=error_def)
num_of_coeff = len(coeff_ranges)
_generate_objective_fn(solver, num_of_coeff, variables)
_generate_constraints(solver, points, num_of_coeff, variables)
solver.Solve()
var_to_val = dict()
for coeff in variables[:num_of_coeff]:
var_to_val[coeff.name()] = coeff.solution_value()
return var_to_val
def demo_optimal_linear_5points():
"""Demonstration of getting optimal linear polynomial.
Uses 5 points from Swanson's curve fitting paper.
"""
print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER')
points = (0,1), (1,3), (2,2), (3,4), (4,5)
coeff_ranges = ((None, None), (None, None))
# solver = pywraplp.Solver(
# 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
optimized_coefficients = get_optimal_polynomial(
points=points, coeff_ranges=coeff_ranges)
for elm in optimized_coefficients:
print('elm: {}'.format(elm))
print(
'type(optimized_coefficients): {}'.format(
type(optimized_coefficients)))
print('optimized_coefficients: {}'.format(optimized_coefficients))
# m, b = optimized_coefficients
# print('Optimized m: {}, b: {}'.format(m, b))
def demo_optimal_linear_10points():
print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_quadratic_10points():
print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_quadratic_19points():
print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0])
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3])
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def demo_optimal_cubic_10points():
print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS')
x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]
y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]
points = tuple(zip(x_vals, y_vals))
coeff_ranges = ((None, None), (None, None), (None, None), (None, None))
print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))
def main():
demo_optimal_quadratic_19points()
if __name__ == '__main__':
main() | 3.65625 | 4 |
build-script-helper.py | aciidb0mb3r/swift-stress-tester | 0 | 4586 | <filename>build-script-helper.py
#!/usr/bin/env python
"""
This source file is part of the Swift.org open source project
Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
Licensed under Apache License v2.0 with Runtime Library Exception
See https://swift.org/LICENSE.txt for license information
See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
------------------------------------------------------------------------------
This is a helper script for the main swift repository's build-script.py that
knows how to build and install the stress tester utilities given a swift
workspace.
"""
from __future__ import print_function
import argparse
import sys
import os, platform
import subprocess
def printerr(message):
print(message, file=sys.stderr)
def main(argv_prefix = []):
args = parse_args(argv_prefix + sys.argv[1:])
run(args)
def parse_args(args):
parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY')
parser.add_argument('--package-dir', default='SourceKitStressTester')
parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands')
parser.add_argument('--prefix', help='install path')
parser.add_argument('--config', default='debug')
parser.add_argument('--build-dir', default='.build')
parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create a unified build of SwiftSyntax with other projects.')
parser.add_argument('--toolchain', required=True, help='the toolchain to use when building this package')
parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies')
parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building')
parser.add_argument('build_actions', help="Extra actions to perform. Can be any number of the following", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs="*", default=['build'])
parsed = parser.parse_args(args)
if ("install" in parsed.build_actions or "all" in parsed.build_actions) and not parsed.prefix:
ArgumentParser.error("'--prefix' is required with the install action")
parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift')
parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib')
# Convert package_dir to absolute path, relative to root of repo.
repo_path = os.path.dirname(__file__)
parsed.package_dir = os.path.realpath(
os.path.join(repo_path, parsed.package_dir))
# Convert build_dir to absolute path, relative to package_dir.
parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir)
return parsed
def run(args):
sourcekit_searchpath=args.sourcekitd_dir
package_name = os.path.basename(args.package_dir)
env = dict(os.environ)
# Use local dependencies (i.e. checked out next sourcekit-lsp).
if not args.no_local_deps:
env['SWIFTCI_USE_LOCAL_DEPS'] = "1"
if args.update:
print("** Updating dependencies of %s **" % package_name)
try:
update_swiftpm_dependencies(package_dir=args.package_dir,
swift_exec=args.swift_exec,
build_dir=args.build_dir,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Updating dependencies of %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
# The test action creates its own build. No need to build if we are just testing
if should_run_any_action(['build', 'install'], args.build_actions):
print("** Building %s **" % package_name)
try:
invoke_swift(package_dir=args.package_dir,
swift_exec=args.swift_exec,
action='build',
products=get_products(args.package_dir),
sourcekit_searchpath=sourcekit_searchpath,
build_dir=args.build_dir,
multiroot_data_file=args.multiroot_data_file,
config=args.config,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Building %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
output_dir = os.path.realpath(os.path.join(args.build_dir, args.config))
if should_run_action("generate-xcodeproj", args.build_actions):
print("** Generating Xcode project for %s **" % package_name)
try:
generate_xcodeproj(args.package_dir,
swift_exec=args.swift_exec,
sourcekit_searchpath=sourcekit_searchpath,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Generating the Xcode project failed')
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
if should_run_action("test", args.build_actions):
print("** Testing %s **" % package_name)
try:
invoke_swift(package_dir=args.package_dir,
swift_exec=args.swift_exec,
action='test',
products=['%sPackageTests' % package_name],
sourcekit_searchpath=sourcekit_searchpath,
build_dir=args.build_dir,
multiroot_data_file=args.multiroot_data_file,
config=args.config,
env=env,
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Testing %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
if should_run_action("install", args.build_actions):
print("** Installing %s **" % package_name)
stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx')
try:
install_package(args.package_dir,
install_dir=args.prefix,
sourcekit_searchpath=sourcekit_searchpath,
build_dir=output_dir,
rpaths_to_delete=[stdlib_dir],
verbose=args.verbose)
except subprocess.CalledProcessError as e:
printerr('FAIL: Installing %s failed' % package_name)
printerr('Executing: %s' % ' '.join(e.cmd))
sys.exit(1)
# Returns true if any of the actions in `action_names` should be run.
def should_run_any_action(action_names, selected_actions):
for action_name in action_names:
if should_run_action(action_name, selected_actions):
return True
return False
def should_run_action(action_name, selected_actions):
if action_name in selected_actions:
return True
elif "all" in selected_actions:
return True
else:
return False
def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose):
args = [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update']
check_call(args, env=env, verbose=verbose)
def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose):
# Until rdar://53881101 is implemented, we cannot request a build of multiple
# targets simultaneously. For now, just build one product after the other.
for product in products:
invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose)
def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose):
args = [swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path', build_dir]
if multiroot_data_file:
args.extend(['--multiroot-data-file', multiroot_data_file])
if action == 'test':
args.extend(['--test-product', product])
else:
args.extend(['--product', product])
# Tell SwiftSyntax that we are building in a build-script environment so that
# it does not need to rebuilt if it has already been built before.
env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1'
env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath
check_call(args, env=env, verbose=verbose)
def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose):
bin_dir = os.path.join(install_dir, 'bin')
lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx')
for directory in [bin_dir, lib_dir]:
if not os.path.exists(directory):
os.makedirs(directory)
# Install sk-stress-test and sk-swiftc-wrapper
for product in get_products(package_dir):
src = os.path.join(build_dir, product)
dest = os.path.join(bin_dir, product)
# Create a copy of the list since we modify it
rpaths_to_delete_for_this_product = list(rpaths_to_delete)
# Add the rpath to the stdlib in in the toolchain
rpaths_to_add = ['@executable_path/../lib/swift/macosx']
if product in ['sk-stress-test', 'swift-evolve']:
# Make the rpath to sourcekitd relative in the toolchain
rpaths_to_delete_for_this_product += [sourcekit_searchpath]
rpaths_to_add += ['@executable_path/../lib']
install(src, dest,
rpaths_to_delete=rpaths_to_delete_for_this_product,
rpaths_to_add=rpaths_to_add,
verbose=verbose)
def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose):
copy_cmd=['rsync', '-a', src, dest]
print('installing %s to %s' % (os.path.basename(src), dest))
check_call(copy_cmd, verbose=verbose)
for rpath in rpaths_to_delete:
remove_rpath(dest, rpath, verbose=verbose)
for rpath in rpaths_to_add:
add_rpath(dest, rpath, verbose=verbose)
def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose):
package_name = os.path.basename(package_dir)
config_path = os.path.join(package_dir, 'Config.xcconfig')
with open(config_path, 'w') as config_file:
config_file.write('''
SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited)
LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited)
'''.format(sourcekit_searchpath=sourcekit_searchpath))
xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name)
args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path]
check_call(args, env=env, verbose=verbose)
def add_rpath(binary, rpath, verbose):
cmd = ['install_name_tool', '-add_rpath', rpath, binary]
check_call(cmd, verbose=verbose)
def remove_rpath(binary, rpath, verbose):
cmd = ['install_name_tool', '-delete_rpath', rpath, binary]
check_call(cmd, verbose=verbose)
def check_call(cmd, verbose, env=os.environ, **kwargs):
if verbose:
print(' '.join([escape_cmd_arg(arg) for arg in cmd]))
return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs)
def interleave(value, list):
return [item for pair in zip([value] * len(list), list) for item in pair]
def escape_cmd_arg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def get_products(package_dir):
# FIXME: We ought to be able to query SwiftPM for this info.
if package_dir.endswith("/SourceKitStressTester"):
return ['sk-stress-test', 'sk-swiftc-wrapper']
elif package_dir.endswith("/SwiftEvolve"):
return ['swift-evolve']
else:
return []
if __name__ == '__main__':
main()
| 1.953125 | 2 |
tests/components/deconz/test_scene.py | pcaston/core | 1 | 4587 | <filename>tests/components/deconz/test_scene.py
"""deCONZ scene platform tests."""
from unittest.mock import patch
from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON
from openpeerpower.const import ATTR_ENTITY_ID
from .test_gateway import (
DECONZ_WEB_REQUEST,
mock_deconz_put_request,
setup_deconz_integration,
)
async def test_no_scenes(opp, aioclient_mock):
"""Test that scenes can be loaded without scenes being available."""
await setup_deconz_integration(opp, aioclient_mock)
assert len(opp.states.async_all()) == 0
async def test_scenes(opp, aioclient_mock):
"""Test that scenes works."""
data = {
"groups": {
"1": {
"id": "Light group id",
"name": "Light group",
"type": "LightGroup",
"state": {"all_on": False, "any_on": True},
"action": {},
"scenes": [{"id": "1", "name": "Scene"}],
"lights": [],
}
}
}
with patch.dict(DECONZ_WEB_REQUEST, data):
config_entry = await setup_deconz_integration(opp, aioclient_mock)
assert len(opp.states.async_all()) == 1
assert opp.states.get("scene.light_group_scene")
# Verify service calls
mock_deconz_put_request(
aioclient_mock, config_entry.data, "/groups/1/scenes/1/recall"
)
# Service turn on scene
await opp.services.async_call(
SCENE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "scene.light_group_scene"},
blocking=True,
)
assert aioclient_mock.mock_calls[1][2] == {}
await opp.config_entries.async_unload(config_entry.entry_id)
assert len(opp.states.async_all()) == 0
| 2.125 | 2 |
tensorhive/config.py | roscisz/TensorHive | 129 | 4588 | from pathlib import PosixPath
import configparser
from typing import Dict, Optional, Any, List
from inspect import cleandoc
import shutil
import tensorhive
import os
import logging
log = logging.getLogger(__name__)
class CONFIG_FILES:
# Where to copy files
# (TensorHive tries to load these by default)
config_dir = PosixPath.home() / '.config/TensorHive'
MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini')
HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini')
MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini')
# Where to get file templates from
# (Clone file when it's not found in config directory)
tensorhive_package_dir = PosixPath(__file__).parent
MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini')
HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini')
MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini')
ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini')
MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations')
class ConfigInitilizer:
'''Makes sure that all default config files exist'''
def __init__(self):
# 1. Check if all config files exist
all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \
PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()
if not all_exist:
log.warning('[•] Detected missing default config file(s), recreating...')
self.recreate_default_configuration_files()
log.info('[•] All configs already exist, skipping...')
def recreate_default_configuration_files(self) -> None:
try:
# 1. Create directory for stroing config files
CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True)
# 2. Clone templates safely from `tensorhive` package
self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH)
self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)
# 3. Change config files permission
rw_owner_only = 0o600
os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)
os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)
except Exception:
log.error('[✘] Unable to recreate configuration files.')
def safe_copy(self, src: str, dst: str) -> None:
'''Safe means that it won't override existing configuration'''
if PosixPath(dst).exists():
log.info('Skipping, file already exists: {}'.format(dst))
else:
shutil.copy(src, dst)
log.info('Copied {} to {}'.format(src, dst))
class ConfigLoader:
@staticmethod
def load(path, displayed_title=''):
import configparser
config = configparser.ConfigParser(strict=False)
full_path = PosixPath(path).expanduser()
if config.read(str(full_path)):
log.info('[•] Reading {} config from {}'.format(displayed_title, full_path))
else:
log.warning('[✘] Configuration file not found ({})'.format(full_path))
log.info('Using default {} settings from config.py'.format(displayed_title))
return config
ConfigInitilizer()
config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')
def display_config(cls):
'''
Displays all uppercase class atributes (class must be defined first)
Example usage: display_config(API_SERVER)
'''
print('[{class_name}]'.format(class_name=cls.__name__))
for key, value in cls.__dict__.items():
if key.isupper():
print('{} = {}'.format(key, value))
def check_env_var(name: str):
'''Makes sure that env variable is declared'''
if not os.getenv(name):
msg = cleandoc(
'''
{env} - undeclared environment variable!
Try this: `export {env}="..."`
''').format(env=name).split('\n')
log.warning(msg[0])
log.warning(msg[1])
class SSH:
section = 'ssh'
HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)
TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True)
TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0)
NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1)
KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key')
def hosts_config_to_dict(path: str) -> Dict: # type: ignore
'''Parses sections containing hostnames'''
hosts_config = ConfigLoader.load(path, displayed_title='hosts')
result = {}
for section in hosts_config.sections():
# We want to parse only sections which describe target hosts
if section == 'proxy_tunneling':
continue
hostname = section
result[hostname] = {
'user': hosts_config.get(hostname, 'user'),
'port': hosts_config.getint(hostname, 'port', fallback=22)
}
return result
def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore
'''Parses [proxy_tunneling] section'''
config = ConfigLoader.load(path, displayed_title='proxy')
section = 'proxy_tunneling'
# Check if section is present and if yes, check if tunneling is enabled
if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False):
return {
'proxy_host': config.get(section, 'proxy_host'),
'proxy_user': config.get(section, 'proxy_user'),
'proxy_port': config.getint(section, 'proxy_port', fallback=22)
}
else:
return None
AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE)
PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE)
class DB:
section = 'database'
default_path = '~/.config/TensorHive/database.sqlite'
def uri_for_path(path: str) -> str: # type: ignore
return 'sqlite:///{}'.format(PosixPath(path).expanduser())
SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path))
TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite)
class API:
section = 'api'
TITLE = config.get(section, 'title', fallback='TensorHive API')
URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0')
URL_PREFIX = config.get(section, 'url_prefix', fallback='api')
SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml')
IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers')
import yaml
respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml')
with open(respones_file_path, 'r') as file:
RESPONSES = yaml.safe_load(file)
class APP_SERVER:
section = 'web_app.server'
BACKEND = config.get(section, 'backend', fallback='gunicorn')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=5000)
WORKERS = config.getint(section, 'workers', fallback=4)
LOG_LEVEL = config.get(section, 'loglevel', fallback='warning')
class API_SERVER:
section = 'api.server'
BACKEND = config.get(section, 'backend', fallback='gevent')
HOST = config.get(section, 'host', fallback='0.0.0.0')
PORT = config.getint(section, 'port', fallback=1111)
DEBUG = config.getboolean(section, 'debug', fallback=False)
class MONITORING_SERVICE:
section = 'monitoring_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
class PROTECTION_SERVICE:
section = 'protection_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True)
NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False)
class MAILBOT:
mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot')
section = 'general'
INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0)
MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section,
'max_emails_per_protection_interval', fallback=50)
NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True)
NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False)
ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None)
section = 'smtp'
SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None)
SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None)
SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None)
SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587)
section = 'template/intruder'
INTRUDER_SUBJECT = mailbot_config.get(section, 'subject')
INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
section = 'template/admin'
ADMIN_SUBJECT = mailbot_config.get(section, 'subject')
ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')
class USAGE_LOGGING_SERVICE:
section = 'usage_logging_service'
default_path = '~/.config/TensorHive/logs/'
def full_path(path: str) -> str: # type: ignore
return str(PosixPath(path).expanduser())
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)
LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path))
LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2)
class JOB_SCHEDULING_SERVICE:
section = 'job_scheduling_service'
ENABLED = config.getboolean(section, 'enabled', fallback=True)
UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0)
STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0)
SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, "schedule_queued_jobs_when_free_mins", fallback=30)
class AUTH:
from datetime import timedelta
section = 'auth'
def config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore
'''
Parses value for option from string to a valid python list.
Fallback value is returned when anything goes wrong (e.g. option or value not present)
Example .ini file, function called with arguments: option='some_option', fallback=None
[some_section]
some_option = ['foo', 'bar']
Will return:
['foo', 'bar']
'''
import ast
try:
raw_arguments = config.get('auth', option)
parsed_arguments = ast.literal_eval(raw_arguments)
return parsed_arguments
except (configparser.Error, ValueError):
log.warning('Parsing [auth] config section failed for option "{}", using fallback value: {}'.format(
option, fallback))
return fallback
FLASK_JWT = {
'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'),
'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True),
'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']),
'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True),
'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes',
fallback=1)),
'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days',
fallback=1)),
'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers'])
}
| 2.234375 | 2 |
model.py | iz2late/baseline-seq2seq | 1 | 4589 | import random
from typing import Tuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch import Tensor
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super().__init__()
self.input_dim = input_dim
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.dropout = dropout
self.embedding = nn.Embedding(input_dim, emb_dim)
self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
embedded = self.dropout(self.embedding(src))
outputs, hidden = self.rnn(embedded)
# output of bi-directional rnn should be concatenated
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn_in = (enc_hid_dim * 2) + dec_hid_dim
self.attn = nn.Linear(self.attn_in, attn_dim)
def forward(self, decoder_hidden, encoder_outputs):
src_len = encoder_outputs.shape[0]
repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((
repeated_decoder_hidden,
encoder_outputs),
dim = 2)))
attention = torch.sum(energy, dim=2)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().__init__()
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.dropout = dropout
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs):
a = self.attention(decoder_hidden, encoder_outputs)
a = a.unsqueeze(1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
weighted_encoder_rep = torch.bmm(a, encoder_outputs)
weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2)
return weighted_encoder_rep
def forward(self, input, decoder_hidden, encoder_outputs):
input = input.unsqueeze(0)
embedded = self.dropout(self.embedding(input))
weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden,
encoder_outputs)
rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2)
output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0))
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted_encoder_rep = weighted_encoder_rep.squeeze(0)
output = self.out(torch.cat((output,
weighted_encoder_rep,
embedded), dim = 1))
return output, decoder_hidden.squeeze(0)
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
batch_size = src.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)
encoder_outputs, hidden = self.encoder(src)
# first input to the decoder is the <sos> token
output = trg[0,:]
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, encoder_outputs)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = (trg[t] if teacher_force else top1)
return outputs
| 2.3125 | 2 |
ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py | xuyannus/Machine-Learning-Collection | 3,094 | 4590 | <filename>ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py<gh_stars>1000+
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
import numpy as np
import spacy
import random
from torch.utils.tensorboard import SummaryWriter # to print to tensorboard
from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint
spacy_ger = spacy.load("de")
spacy_eng = spacy.load("en")
def tokenize_ger(text):
return [tok.text for tok in spacy_ger.tokenizer(text)]
def tokenize_eng(text):
return [tok.text for tok in spacy_eng.tokenizer(text)]
german = Field(tokenize=tokenize_ger, lower=True, init_token="<sos>", eos_token="<eos>")
english = Field(
tokenize=tokenize_eng, lower=True, init_token="<sos>", eos_token="<eos>"
)
train_data, valid_data, test_data = Multi30k.splits(
exts=(".de", ".en"), fields=(german, english)
)
german.build_vocab(train_data, max_size=10000, min_freq=2)
english.build_vocab(train_data, max_size=10000, min_freq=2)
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
def forward(self, x):
# x shape: (seq_length, N) where N is batch size
embedding = self.dropout(self.embedding(x))
# embedding shape: (seq_length, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding)
# outputs shape: (seq_length, N, hidden_size)
return hidden, cell
class Decoder(nn.Module):
def __init__(
self, input_size, embedding_size, hidden_size, output_size, num_layers, p
):
super(Decoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden, cell):
# x shape: (N) where N is for batch size, we want it to be (1, N), seq_length
# is 1 here because we are sending in a single word and not a sentence
x = x.unsqueeze(0)
embedding = self.dropout(self.embedding(x))
# embedding shape: (1, N, embedding_size)
outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell))
# outputs shape: (1, N, hidden_size)
predictions = self.fc(outputs)
# predictions shape: (1, N, length_target_vocabulary) to send it to
# loss function we want it to be (N, length_target_vocabulary) so we're
# just gonna remove the first dim
predictions = predictions.squeeze(0)
return predictions, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, source, target, teacher_force_ratio=0.5):
batch_size = source.shape[1]
target_len = target.shape[0]
target_vocab_size = len(english.vocab)
outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device)
hidden, cell = self.encoder(source)
# Grab the first input to the Decoder which will be <SOS> token
x = target[0]
for t in range(1, target_len):
# Use previous hidden, cell as context from encoder at start
output, hidden, cell = self.decoder(x, hidden, cell)
# Store next output prediction
outputs[t] = output
# Get the best word the Decoder predicted (index in the vocabulary)
best_guess = output.argmax(1)
# With probability of teacher_force_ratio we take the actual next word
# otherwise we take the word that the Decoder predicted it to be.
# Teacher Forcing is used so that the model gets used to seeing
# similar inputs at training and testing time, if teacher forcing is 1
# then inputs at test time might be completely different than what the
# network is used to. This was a long comment.
x = target[t] if random.random() < teacher_force_ratio else best_guess
return outputs
### We're ready to define everything we need for training our Seq2Seq model ###
# Training hyperparameters
num_epochs = 100
learning_rate = 0.001
batch_size = 64
# Model hyperparameters
load_model = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size_encoder = len(german.vocab)
input_size_decoder = len(english.vocab)
output_size = len(english.vocab)
encoder_embedding_size = 300
decoder_embedding_size = 300
hidden_size = 1024 # Needs to be the same for both RNN's
num_layers = 2
enc_dropout = 0.5
dec_dropout = 0.5
# Tensorboard to get nice loss plot
writer = SummaryWriter(f"runs/loss_plot")
step = 0
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_within_batch=True,
sort_key=lambda x: len(x.src),
device=device,
)
encoder_net = Encoder(
input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout
).to(device)
decoder_net = Decoder(
input_size_decoder,
decoder_embedding_size,
hidden_size,
output_size,
num_layers,
dec_dropout,
).to(device)
model = Seq2Seq(encoder_net, decoder_net).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
pad_idx = english.vocab.stoi["<pad>"]
criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
if load_model:
load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)
sentence = "ein boot mit mehreren männern darauf wird von einem großen pferdegespann ans ufer gezogen."
for epoch in range(num_epochs):
print(f"[Epoch {epoch} / {num_epochs}]")
checkpoint = {"state_dict": model.state_dict(), "optimizer": optimizer.state_dict()}
save_checkpoint(checkpoint)
model.eval()
translated_sentence = translate_sentence(
model, sentence, german, english, device, max_length=50
)
print(f"Translated example sentence: \n {translated_sentence}")
model.train()
for batch_idx, batch in enumerate(train_iterator):
# Get input and targets and get to cuda
inp_data = batch.src.to(device)
target = batch.trg.to(device)
# Forward prop
output = model(inp_data, target)
# Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss
# doesn't take input in that form. For example if we have MNIST we want to have
# output to be: (N, 10) and targets just (N). Here we can view it in a similar
# way that we have output_words * batch_size that we want to send in into
# our cost function, so we need to do some reshapin. While we're at it
# Let's also remove the start token while we're at it
output = output[1:].reshape(-1, output.shape[2])
target = target[1:].reshape(-1)
optimizer.zero_grad()
loss = criterion(output, target)
# Back prop
loss.backward()
# Clip to avoid exploding gradient issues, makes sure grads are
# within a healthy range
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
# Gradient descent step
optimizer.step()
# Plot to tensorboard
writer.add_scalar("Training loss", loss, global_step=step)
step += 1
score = bleu(test_data[1:100], model, german, english, device)
print(f"Bleu score {score*100:.2f}")
| 2.3125 | 2 |
gail_chatbot/light/sqil/light_sentence_imitate_mixin.py | eublefar/gail_chatbot | 0 | 4591 | from typing import Dict, Any, List
import string
from parlai.core.agents import Agent
from parlai.core.message import Message
from random import sample
import pathlib
path = pathlib.Path(__file__).parent.absolute()
class LightImitateMixin(Agent):
"""Abstract class that handles passing expert trajectories alongside self-play sampling
"""
def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any] = None):
self.id = "LightChatbotSelfPlay"
self.train_step = 0
self.self_speaker_token = "<speaker_self>"
self.other_speaker_token = "<speaker_other>"
def act(self):
raise NotImplementedError()
def batch_act(self, observations):
self.train_step += 1
# Add generated histories to data ones
imitate = []
sample = []
for i, observation in enumerate(observations):
sample.extend(
[
(dialog[0], dialog[1][:-1])
for dialog in observation["text"] if len(dialog[1]) > 0
]
)
imitate.extend(
[
dialog
for dialog in observation["text"] if len(dialog[1]) > 0
]
)
self.batch_imitate(imitate)
utterances = self.batch_sample(sample)
if (
self.train_step % self.episode_num_dialog_dump == 0
) and self.train_step != 0:
self.checkpoint([sample, utterances])
return [{"id": self.id} for _ in observations]
def batch_imitate(self, dialogs):
"""Implement sampling utterances and memorization here"""
pass
def batch_sample(self, dialogs) -> List[str]:
"""Implement update here"""
pass
def batch_update(self):
"""Update weights here"""
pass
def _update_histories(self, utterances, other=False):
for i in range(len(utterances)):
history = self.histories[i]
history.append(
(self.self_speaker_token if not other else self.other_speaker_token)
+ utterances[i]
)
self.histories[i] = history
def _convert_history_to_other(self, history):
history = [
turn.replace(self.self_speaker_token, self.other_speaker_token)
if self.self_speaker_token in turn
else turn.replace(self.other_speaker_token, self.self_speaker_token)
for turn in history
]
return history
| 2.484375 | 2 |
pytudes/_2021/educative/grokking_the_coding_interview/fast_and_slow_pointers/_1__linked_list_cycle__easy.py | TeoZosa/pytudes | 1 | 4592 | <reponame>TeoZosa/pytudes
"""https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D
Categories:
- Binary
- Bit Manipulation
- Blind 75
See Also:
- pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py
"""
from pytudes._2021.utils.linked_list import (
ListNode,
NodeType,
convert_list_to_linked_list,
)
def has_cycle(head: NodeType) -> bool:
"""
Args:
head: head of a singly-linked list of nodes
Returns:
whether or not the linked list has a cycle
Examples:
>>> has_cycle(None)
False
>>> head = ListNode("self-edge")
>>> head.next = head
>>> has_cycle(head)
True
>>> head = convert_list_to_linked_list([1,2,3,4,5,6])
>>> has_cycle(head)
False
>>> head.next.next.next.next.next.next = head.next.next
>>> has_cycle(head)
True
>>> head.next.next.next.next.next.next = head.next.next.next
>>> has_cycle(head)
True
"""
slow = fast = head
while fast is not None and fast.next is not None: # since fast ≥ slow
slow = slow.next
fast = fast.next.next
if slow == fast:
return True # found the cycle
else:
return False
def main():
head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6])
print("LinkedList has cycle: " + str(has_cycle(head)))
head.next.next.next.next.next.next = head.next.next
print("LinkedList has cycle: " + str(has_cycle(head)))
head.next.next.next.next.next.next = head.next.next.next
print("LinkedList has cycle: " + str(has_cycle(head)))
main()
| 3.96875 | 4 |
httpd.py | whtt8888/TritonHTTPserver | 2 | 4593 | <filename>httpd.py
import sys
import os
import socket
import time
import threading
class MyServer:
def __init__(self, port, doc_root):
self.port = port
self.doc_root = doc_root
self.host = '127.0.0.1'
self.res_200 = "HTTP/1.1 200 OK\r\nServer: Myserver 1.0\r\n"
self.res_404 = "HTTP/1.1 404 NOT FOUND\r\nServer: Myserver 1.0\r\n\r\n"
self.res_400 = "HTTP/1.1 400 Client Error\r\nServer: Myserver 1.0\r\n\r\n"
self.res_close = "HTTP/1.1 Connection:close\r\nServer: Myserver 1.0\r\n\r\n"
# map request into dict
def req_info(self, request):
# 400 malform
if request[-4:] != '\r\n\r\n':
info = {'url': '400malform'}
return info
headers = request.splitlines()
firstline = headers.pop(0)
try:
(act, url, version) = firstline.split()
except ValueError:
info = {'url': '400malform'}
return info
info = {'act': act, 'url': url, 'version': version}
for h in headers:
h = h.split(': ')
if len(h) < 2:
continue
field = h[0]
value = h[1]
info[field] = value
# mapping url, return 404 escape or absolute filename
# judge whether escape
path = ''
x = url.split('/')
i = 0
while i < len(x):
if '' in x:
x.remove('')
if i < 0 or x[0] == '..' or len(x) == 0: # path escape from file root
info['url'] = '404escape'
return info
if i < len(x) and x[i] == '..':
x.remove(x[i])
x.remove(x[i - 1])
i -= 1
else:
i += 1
# map index.html
if len(x[-1].split('.')) < 2:
x.append('index.html')
for d in range(len(x)):
path = path + '/' + x[d]
info['url'] = os.path.realpath(self.doc_root + path)
return info
# generate response
def res_gen(self, reqinfo):
path = reqinfo['url']
# 404 escape
if path == '404escape':
return self.res_404
# 400 malform req
if path == "400malform":
return self.res_400
try:
reqinfo['Host'] and reqinfo['User-Agent']
except KeyError:
return self.res_400
# 404 not found
if not os.path.isfile(path):
return self.res_404
# a valid 200 req
else:
res = self.res_200
res += "Last-Modified: {}\r\n".format(time.ctime(os.stat(path).st_mtime))
with open(path, "rb") as f:
data = f.read()
res += "Content-Length: {}\r\n".format(len(data))
if path.split('.')[-1] == 'html':
res += 'Content-Type: text/html\r\n\r\n'
res = res + str(data, 'utf-8')
else: # for jpg and png
if path.split('.')[-1] == 'png':
res += 'Content-Type: image/png\r\n\r\n'
else:
res += 'Content-Type: image/jpeg\r\n\r\n'
res = res + str(data)
return res
def createsocket(conn, addr):
with conn:
try:
conn.settimeout(5)
except socket.timeout:
conn.close()
# print('closed')
# print('Connected by', addr)
while True:
req = conn.recv(1024).decode()
if not req:
break
info = server.req_info(req)
msg = server.res_gen(info).encode()
conn.sendall(msg)
# print("msg send finished")
# msg = server.res_close.encode()
# conn.sendall(msg)
break
if __name__ == '__main__':
input_port = int(sys.argv[1])
input_doc_root = sys.argv[2]
server = MyServer(input_port, input_doc_root)
# Add code to start your server here
threads = []
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((server.host, server.port))
s.listen()
while True:
conn, addr = s.accept()
t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr))
t.start()
threads.append(t)
for t in threads:
t.join()
| 2.8125 | 3 |
metric/metric.py | riven314/ENetDepth_TimeAnlysis_Tmp | 0 | 4594 | <gh_stars>0
class Metric(object):
"""Base class for all metrics.
From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py
"""
def reset(self):
pass
def add(self):
pass
def value(self):
pass
| 2.265625 | 2 |
pf_pweb_sourceman/task/git_repo_man.py | problemfighter/pf-pweb-sourceman | 0 | 4595 | from git import Repo
from pf_pweb_sourceman.common.console import console
from pf_py_file.pfpf_file_util import PFPFFileUtil
class GitRepoMan:
def get_repo_name_from_url(self, url: str):
if not url:
return None
last_slash_index = url.rfind("/")
last_suffix_index = url.rfind(".git")
if last_suffix_index < 0:
last_suffix_index = len(url)
if last_slash_index < 0 or last_suffix_index <= last_slash_index:
raise Exception("Invalid repo url {}".format(url))
return url[last_slash_index + 1:last_suffix_index]
def clone_or_pull_project(self, path, url, branch):
repo_name = self.get_repo_name_from_url(url)
if not repo_name:
raise Exception("Invalid repo")
if not PFPFFileUtil.is_exist(path):
console.success("Cloning project: " + repo_name + ", Branch: " + branch)
Repo.clone_from(url, branch=branch, to_path=path)
else:
console.success(repo_name + " Taking pull...")
repo = Repo(path)
repo.git.checkout(branch)
origin = repo.remotes.origin
origin.pull()
| 2.765625 | 3 |
tool/remote_info.py | shanmukmichael/Asset-Discovery-Tool | 0 | 4596 | <filename>tool/remote_info.py
import socket
import paramiko
import json
Hostname = '172.16.17.32'
Username = 'ec2-user'
key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem'
def is_connected():
try:
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection(("8.8.8.8", 53))
return "conneted to the Internet!"
except OSError:
pass
return "Please Connect to the Internet!"
is_connected()
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=Hostname, username=Username, key_filename=key)
except paramiko.AuthenticationException:
print("Failed to connect to {} due to wrong username/password".format(Hostname))
exit(1)
except:
print("Failed to connect to {} ".format(Hostname))
exit(2)
# commands
_, stdout_1, _ = ssh.exec_command("hostname")
_, stdout_2, _ = ssh.exec_command("hostname -I | awk '{print $1}'")
_, stdout_3, _ = ssh.exec_command("cat /sys/class/net/eth0/address")
_, stdout_4, _ = ssh.exec_command(
"awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release".format('"NAME"'))
_, stdout_5, _ = ssh.exec_command("whoami")
_, stdout_6, _ = ssh.exec_command("last -F")
_, stdout_7, _ = ssh.exec_command("netstat -tnpa | grep 'ESTABLISHED.*sshd'")
#_, stdout_8, _ = ssh.exec_command("sudo {}/24".format())
# egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' --IP-address
# ---------------------------------
def remote_data_1():
output_1 = stdout_1.readlines()
output_2 = stdout_2.readlines()
output_3 = stdout_3.readlines()
output_4 = stdout_4.readlines()
output_5 = stdout_5.readlines()
remote_data_1 = {
'Hostname': '',
'IP': '',
'MAC': '',
'OS': '',
'Currentuser': '',
}
remote_data_1['Hostname'] = output_1[0].strip('\n')
remote_data_1['IP'] = output_2[0].strip('\n')
remote_data_1['MAC'] = output_3[0].strip('\n')
remote_data_1['OS'] = output_4[0][1:-1].strip('\"')
remote_data_1['Currentuser'] = output_5[0].strip('\n')
return json.dumps(remote_data_1, indent=4)
# ----------------------------------
def remote_data_2_():
output = stdout_6.readlines()
data_ = []
filter_ = []
remote_data_2 = {
'Hostname': [],
'IP': [],
'MAC': [],
'Lastseen': [],
'Status': [],
}
for i in output:
data_.append(i.split(' '))
for i in data_:
filter_.append(list(filter(None, i)))
for i in range(len(filter_)-3):
remote_data_2['Hostname'].append(filter_[i][0])
remote_data_2['IP'].append(filter_[i][2])
remote_data_2['MAC'].append('not found')
remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8]))
if 'logged' in filter_[i][9]:
remote_data_2['Status'].append('Active')
else:
remote_data_2['Status'].append('Inactive')
# ssh.close()
return remote_data_2
| 2.75 | 3 |
hvac/api/secrets_engines/kv_v2.py | Famoco/hvac | 0 | 4597 | <filename>hvac/api/secrets_engines/kv_v2.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""KvV2 methods module."""
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
DEFAULT_MOUNT_POINT = 'secret'
class KvV2(VaultApiBase):
"""KV Secrets Engine - Version 2 (API).
Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html
"""
def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):
"""Configure backend level settings that are applied to every key in the key-value store.
Supported methods:
POST: /{mount_point}/config. Produces: 204 (empty body)
:param max_versions: The number of versions to keep per key. This value applies to all keys, but a key's
metadata setting can overwrite this value. Once a key has more than the configured allowed versions the
oldest version will be permanently deleted. Defaults to 10.
:type max_versions: int
:param cas_required: If true all keys will require the cas parameter to be set on all write requests.
:type cas_required: bool
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'max_versions': max_versions,
}
if cas_required is not None:
params['cas_required'] = cas_required
api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT):
"""Read the KV Version 2 configuration.
Supported methods:
GET: /auth/{mount_point}/config. Produces: 200 application/json
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
'/v1/{mount_point}/config',
mount_point=mount_point,
)
response = self._adapter.get(url=api_path)
return response.json()
def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT):
"""Retrieve the secret at the specified location.
Supported methods:
GET: /{mount_point}/data/{path}. Produces: 200 application/json
:param path: Specifies the path of the secret to read. This is specified as part of the URL.
:type path: str | unicode
:param version: Specifies the version to return. If not set the latest version is returned.
:type version: int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {}
if version is not None:
params['version'] = version
api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)
response = self._adapter.get(
url=api_path,
params=params,
)
return response.json()
def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT):
"""Create a new version of a secret at the specified location.
If the value does not yet exist, the calling token must have an ACL policy granting the create capability. If
the value already exists, the calling token must have an ACL policy granting the update capability.
Supported methods:
POST: /{mount_point}/data/{path}. Produces: 200 application/json
:param path: Path
:type path: str | unicode
:param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be allowed. If set
to 0 a write will only be allowed if the key doesn't exist. If the index is non-zero the write will only be
allowed if the key's current version matches the version specified in the cas parameter.
:type cas: int
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'options': {},
'data': secret,
}
if cas is not None:
params['options']['cas'] = cas
api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT):
"""Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict
"""
# First, do a read.
try:
current_secret_version = self.read_secret_version(
path=path,
mount_point=mount_point,
)
except exceptions.InvalidPath:
raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path))
# Update existing secret dict.
patched_secret = current_secret_version['data']['data']
patched_secret.update(secret)
# Write back updated secret.
return self.create_or_update_secret(
path=path,
cas=current_secret_version['data']['metadata']['version'],
secret=patched_secret,
mount_point=mount_point,
)
def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Issue a soft delete of the secret's latest version at the specified location.
This marks the version as deleted and will stop it from being returned from reads, but the underlying data will
not be removed. A delete can be undone using the undelete path.
Supported methods:
DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete. This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)
return self._adapter.delete(
url=api_path,
)
def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):
"""Issue a soft delete of the specified versions of the secret.
This marks the versions as deleted and will stop them from being returned from reads,
but the underlying data will not be removed. A delete can be undone using the
undelete path.
Supported methods:
POST: /{mount_point}/delete/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete. This is specified as part of the URL.
:type path: str | unicode
:param versions: The versions to be deleted. The versioned data will not be deleted, but it will no longer be
returned in normal get requests.
:type versions: int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if not isinstance(versions, list) or len(versions) == 0:
error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format(
versions=versions
)
raise exceptions.ParamValidationError(error_msg)
params = {
'versions': versions,
}
api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):
"""Undelete the data for the provided version and path in the key-value store.
This restores the data, allowing it to be returned on get requests.
Supported methods:
POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to undelete. This is specified as part of the URL.
:type path: str | unicode
:param versions: The versions to undelete. The versions will be restored and their data will be returned on
normal get requests.
:type versions: list of int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if not isinstance(versions, list) or len(versions) == 0:
error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format(
versions=versions
)
raise exceptions.ParamValidationError(error_msg)
params = {
'versions': versions,
}
api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):
"""Permanently remove the specified version data and numbers for the provided path from the key-value store.
Supported methods:
POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to destroy.
This is specified as part of the URL.
:type path: str | unicode
:param versions: The versions to destroy. Their data will be
permanently deleted.
:type versions: list of int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if not isinstance(versions, list) or len(versions) == 0:
error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format(
versions=versions
)
raise exceptions.ParamValidationError(error_msg)
params = {
'versions': versions,
}
api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Return a list of key names at the specified location.
Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no
policy-based filtering is performed on keys; do not encode sensitive information in key names. The values
themselves are not accessible via this command.
Supported methods:
LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json
:param path: Specifies the path of the secrets to list. This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)
response = self._adapter.list(
url=api_path,
)
return response.json()
def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Retrieve the metadata and versions for the secret at the specified path.
Supported methods:
GET: /{mount_point}/metadata/{path}. Produces: 200 application/json
:param path: Specifies the path of the secret to read. This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)
response = self._adapter.get(
url=api_path,
)
return response.json()
def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):
"""Updates the max_versions of cas_required setting on an existing path.
Supported methods:
POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body)
:param path: Path
:type path: str | unicode
:param max_versions: The number of versions to keep per key. If not set, the backend's configured max version is
used. Once a key has more than the configured allowed versions the oldest version will be permanently
deleted.
:type max_versions: int
:param cas_required: If true the key will require the cas parameter to be set on all write requests. If false,
the backend's configuration will be used.
:type cas_required: bool
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {}
if max_versions is not None:
params['max_versions'] = max_versions
if cas_required is not None:
if not isinstance(cas_required, bool):
error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required))
raise exceptions.ParamValidationError(error_msg)
params['cas_required'] = cas_required
api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Delete (permanently) the key metadata and all version data for the specified key.
All version history will be removed.
Supported methods:
DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete. This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)
return self._adapter.delete(
url=api_path,
)
| 2.53125 | 3 |
android/install-all.py | SaschaWillems/vulkan_slim | 28 | 4598 | # Install all examples to connected device(s)
import subprocess
import sys
answer = input("Install all vulkan examples to attached device, this may take some time! (Y/N)").lower() == 'y'
if answer:
BUILD_ARGUMENTS = ""
for arg in sys.argv[1:]:
if arg == "-validation":
BUILD_ARGUMENTS += "-validation"
if subprocess.call(("python build-all.py -deploy %s" % BUILD_ARGUMENTS).split(' ')) != 0:
print("Error: Not all examples may have been installed!")
sys.exit(-1)
| 2.59375 | 3 |
main.py | juangallostra/moonboard | 0 | 4599 | from generators.ahoughton import AhoughtonGenerator
from render_config import RendererConfig
from problem_renderer import ProblemRenderer
from moonboard import get_moonboard
from adapters.default import DefaultProblemAdapter
from adapters.crg import CRGProblemAdapter
from adapters.ahoughton import AhoughtonAdapter
import json
def main():
# Create Renderer
config = RendererConfig()
renderer = ProblemRenderer(
get_moonboard(2017),
DefaultProblemAdapter(),
config
)
crg_renderer = ProblemRenderer(
get_moonboard(2017),
CRGProblemAdapter(),
config
)
ahoughton_renderer_2016 = ProblemRenderer(
get_moonboard(2016),
AhoughtonAdapter(),
config
)
ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe')
ahoughton_renderer_2017 = ProblemRenderer(
get_moonboard(2017),
AhoughtonAdapter(),
config
)
ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe')
# Load data
with open('data/problems.json', 'r') as f:
problems = json.load(f)
renderer.render_problem(problems['339318'], with_info=True)
with open('data/crg.json', 'r') as f:
crg_problems = json.load(f)
crg_renderer.render_problem(crg_problems['1'])
# Ahoughton generator and adapter test
# 2016
problem = ahoughton_generator_2016.generate()
ahoughton_renderer_2016.render_problem(problem)
# 2017
problem = ahoughton_generator_2017.generate()
ahoughton_renderer_2017.render_problem(problem)
if __name__ == "__main__":
main()
| 2.203125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.