max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
py/hashmap.py | maoandmoon/dataStructures | 1 | 12794451 | from .linkedlist import LinkedList
class Hashmap(object):
"""
character holding hash map
"""
def __init__(self, hash_fn, length=100):
self.num_values = 0
assert hasattr(hash_fn, '__call__'), 'You must provide a hash function'
self._buckets = [None] * length
self.hash_len = length
self.hash_fn = hash_fn
# Max items per bucket
self.change_len = length / 5
def _hash(self, key):
return self.hash_fn(key) % self.hash_len
def put(self, key, val):
pos = self._hash(key)
bucket = self._buckets[pos]
if bucket is None:
self._buckets[pos] = bucket = LinkedList()
bucket.put(key, val)
else:
bucket.put(key, val)
if len(bucket) >= self.change_len:
# print('growing', 'num buckets: ', len(self._buckets))
self._grow()
def _grow(self):
# Double size of buckets
self.hash_len = self.hash_len * 2
# New max len for buckets
self.change_len = self.hash_len / 5
# new bucket holder
oldBuckets = self._buckets
self._buckets = [None] * self.hash_len
# Iterate through all buckets
# and reinsert key=>values
for bucket in oldBuckets:
if bucket is None:
continue
for (key, val) in bucket:
self.put(key, val)
def get(self, key):
pos = self._hash(key)
bucket = self._buckets[pos]
if bucket is None:
return None
return bucket.get(key)
def delete(self, key):
"""
Deletes a value in a hashmap
returns the value in the map if it exists
"""
pos = self._hash(key)
node = self._buckets[pos]
if node is None:
return None
self._buckets[pos] = None
self.num_values -= 1
return node.val
def _shrink(self):
# length = self.hash_len
pass
def __repr__(self):
return '<Hashmap %r>' % self._buckets
def __len__(self):
n = 0
for bucket in self._buckets:
if not bucket:
continue
n += len(bucket)
return n
def get_num_empty_buckets(self):
n = 0
for bucket in self._buckets:
if bucket is None or len(bucket) == 0:
n += 1
return n
def get_longest_bucket(self):
longest = 0
b = None
for bucket in self._buckets:
if bucket is None: continue
l = len(bucket)
if longest < l:
longest = l
b = bucket
return longest
def get_shortest_bucket(self):
shortest = 0
b = None
for bucket in self._buckets:
if bucket is None:
shortest = 0
b = None
break
l = len(bucket)
if shortest == 0:
shortest = l
if shortest >= l:
shortest = l
b = bucket
return shortest
| 3.796875 | 4 |
tests/base_test.py | HelloMorrisMoss/mahlo_popup | 0 | 12794452 | """Parent class for each non-unit test. Creates and removes a new test table for each test."""
# TODO: integrate creating/removing a database
from unittest import TestCase
from flask_server_files.flask_app import app
from flask_server_files.sqla_instance import fsa
# from flask.ext.testing import TestCase
class BaseTest(TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://tester:tester@localhost:5432/test'
app.testing = True
self.app_context = app.app_context
self.app = app.test_client()
with self.app_context():
fsa.init_app(app)
fsa.create_all()
def tearDown(self):
with self.app_context():
fsa.session.remove()
fsa.drop_all()
| 2.6875 | 3 |
playground/service.py | tairan/playground-python | 0 | 12794453 | <reponame>tairan/playground-python<filename>playground/service.py
import datetime
import pytz
from .models import (
Account
)
class AccountService():
def __init__(self, session):
self.session = session
def sign_in(self, name):
user = self.session.Query(Account).filter(name=name).fisrt()
if user:
user.last_signed_at = datetime.datetime.now(datetime.datetime.now(tz=pytz.timezone('UTC')))
else:
raise Exception('user `{0}` does not exist.'.format(name))
def create_new_user(self, name):
if not self.user_exists(name):
act = Account(name=name)
self.session.add(act)
act.commit()
return act
else:
raise Exception('user `{0}` already exist.'.format(name))
def user_exists(self, name):
account = self.session.query(Account).filter_by(name = name).first()
return account != null
| 3 | 3 |
teamspirit/profiles/models.py | etienne86/oc_p13_team_spirit | 0 | 12794454 | <gh_stars>0
"""Contain the models related to the app ``profiles``."""
from django.db import models, transaction
from teamspirit.core.models import Address
from teamspirit.profiles.managers import (
PersonalManager,
RoleManager,
rename_id_file,
rename_medical_file,
)
from teamspirit.users.models import User
class Personal(models.Model):
"""Contain personal information."""
phone_number = models.CharField(
max_length=20,
verbose_name='Téléphone',
null=True,
blank=False,
default='',
)
address = models.ForeignKey(
to=Address,
on_delete=models.CASCADE,
null=False,
)
has_private_profile = models.BooleanField(
default=False,
verbose_name='Profil privé ?',
help_text='Si cette case est cochée, mes informations ne seront pas '
'visibles par les autres adhérents.',
)
id_file = models.FileField(
null=True,
blank=True,
verbose_name='Pièce d\'identité',
upload_to=rename_id_file,
)
medical_file = models.FileField(
null=True,
blank=True,
verbose_name='Certificat médical ou licence',
upload_to=rename_medical_file,
)
objects = PersonalManager()
def __str__(self):
user = User.objects.get(personal=self.id)
result = "Informations personnelles pour " + \
f"{user.first_name} {user.last_name}"
return result
class Role(models.Model):
"""Qualify user's role."""
is_member = models.BooleanField(
default=True,
verbose_name="Adhérent(e) de l'association"
)
is_secretary = models.BooleanField(
default=False,
verbose_name="Secrétariat"
)
is_treasurer = models.BooleanField(
default=False,
verbose_name="Trésorerie"
)
is_president = models.BooleanField(
default=False,
verbose_name="Présidence"
)
is_inactive = models.BooleanField(
default=False,
verbose_name="Non adhérent(e)"
)
objects = RoleManager()
def set_as_member(self):
"""Qualify the user as member."""
with transaction.atomic():
self.is_member = True
self.is_secretary = False
self.is_treasurer = False
self.is_president = False
self.is_inactive = False
def set_as_secretary(self):
"""Qualify the user as secretary."""
with transaction.atomic():
self.is_member = False
self.is_secretary = True
self.is_treasurer = False
self.is_president = False
self.is_inactive = False
def set_as_treasurer(self):
"""Qualify the user as treasurer."""
with transaction.atomic():
self.is_member = False
self.is_secretary = False
self.is_treasurer = True
self.is_president = False
self.is_inactive = False
def set_as_president(self):
"""Qualify the user as president."""
with transaction.atomic():
self.is_member = False
self.is_secretary = False
self.is_treasurer = False
self.is_president = True
self.is_inactive = False
def set_as_inactive(self):
"""Qualify the user as inactive."""
with transaction.atomic():
self.is_member = False
self.is_secretary = False
self.is_treasurer = False
self.is_president = False
self.is_inactive = True
| 2.375 | 2 |
command/httlemon.py | dev-lemon/httlemon | 1 | 12794455 | <gh_stars>1-10
import click
from client.client_request import client_request
@click.command()
@click.argument('http_verb')
@click.argument('url')
def httlemon(http_verb, url):
beautified_response = client_request(http_verb, url)
click.echo(beautified_response)
| 2.453125 | 2 |
dbq/main.py | getyourguide/dbq | 6 | 12794456 | import os
import sys
import json
import atexit
from argparse import ArgumentParser
from shutil import get_terminal_size
from subprocess import Popen, PIPE
from textwrap import dedent
from pkg_resources import get_distribution
from databricks_dbapi import databricks
from tabulate import tabulate
MAX_ROWS = 100
HISTORY = os.path.join(os.path.expanduser('~'), '.dbq_history')
def read_credentials():
filename = os.path.join(
os.path.expanduser('~'), '.databricks-credentials.json'
)
try:
with open(filename) as f:
return json.load(f)
except FileNotFoundError:
print('Databricks credentials missing!', file=sys.stderr)
print(
dedent(
f'''\
Please set up {filename} as follows:
{{
"cluster": "A CLUSTER NAME",
"host": "dbc-????????-????.cloud.databricks.com",
"token": "YOUR API ACCESS TOKEN"
}}'''
),
file=sys.stderr,
)
sys.exit(1)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'query',
help='query to run, use - to read from stdin or omit for '
'interactive session',
metavar='QUERY',
nargs='?',
)
return parser.parse_args()
def render(cursor):
headers = [col[0] for col in cursor.description]
rows = cursor.fetchall()
table = tabulate(rows[:MAX_ROWS], headers)
return (
table
+ '\n\nshowing '
+ (
f'first {MAX_ROWS} rows'
if len(rows) > MAX_ROWS
else f'full result, {len(rows)} row(s)'
)
)
def get_text_size(text):
lines = text.split('\n')
column_count = max(map(len, lines))
line_count = len(lines)
return column_count, line_count
def page(output):
process = Popen(["less", "-R"], stdin=PIPE)
try:
process.communicate(output.encode("utf-8"))
except IOError:
pass
def display(text):
term_columns, term_lines = get_terminal_size()
text_columns, text_lines = get_text_size(text)
if (
text_lines + 2 <= term_lines and text_columns <= term_columns
) or not sys.stdout.isatty():
return print(text)
page(text)
def sanitize_query(query):
return query + f' LIMIT {MAX_ROWS + 1}'
def try_extract_error(exception):
try:
return exception.args[0].status.errorMessage
except Exception:
raise exception
def run_query(cursor, query):
try:
cursor.execute(sanitize_query(query))
except Exception as e:
print(try_extract_error(e), file=sys.stderr)
return
display(render(cursor))
def setup_readline():
import readline
try:
readline.read_history_file(HISTORY)
except FileNotFoundError:
pass
atexit.register(readline.write_history_file, HISTORY)
def run_interactive(cursor):
setup_readline()
print(get_distribution('dbq'))
print('running in interactive mode, go ahead and type some SQL...')
try:
while True:
query = input('> ')
if query:
run_query(cursor, query)
except (EOFError, KeyboardInterrupt):
print()
print('Bye!')
def run():
args = parse_args()
connection = databricks.connect(**read_credentials())
cursor = connection.cursor()
if args.query:
query = sys.stdin.read() if args.query == '-' else args.query
run_query(cursor, query)
else:
run_interactive(cursor)
| 2.8125 | 3 |
aiokraken/websockets/schemas/__init__.py | asmodehn/aiokraken | 0 | 12794457 | <reponame>asmodehn/aiokraken
# TODO : Marshmallow schemas : Possible optimizations
# - if input is a list ( and not a dict/ class instance), iterate on it, zipped with declared fields.
# - from an existing schema, have a way to "restrict" the field to a "smaller" field.
# A kind fo gradual typing i guess...
| 1.851563 | 2 |
pixel/tests.py | trevor-ngugi/pixel-collab | 0 | 12794458 | from django.test import TestCase
from .models import Place,Category,Image
# Create your tests here.
class PlaceTestClass(TestCase):
def setUp(self):
self.nairobi=Place(location='nairobi')
def test_instance(self):
self.assertTrue(isinstance(self.nairobi,Place))
def test_save_method(self):
self.nairobi.save_place()
locations=Place.objects.all()
self.assertTrue(len(locations)>0)
class CategoryTestClass(TestCase):
def setUp(self):
self.food=Category(name='food')
def test_instance(self):
self.assertTrue(isinstance(self.food,Category))
def test_save_method(self):
self.food.save_category()
categories=Category.objects.all()
self.assertTrue(len(categories)>0)
class ImageTestClass(TestCase):
def setUp(self):
self.nairobi=Place(location='nairobi')
self.nairobi.save()
self.food=Category(name='food')
self.food.save()
self.post=Image(image='imagepic',image_name='food pic',description='nice food',location=self.nairobi,category=self.food)
self.post.save()
def test_instance(self):
self.assertTrue(isinstance(self.post,Image))
def test_save_method(self):
self.post.save_image()
images=Image.objects.all()
self.assertTrue(len(images)>0)
def test_delete_image(self):
Image.delete_image(self.post.id)
images=Image.objects.all()
self.assertTrue(len(images)==0)
def test_update_image(self):
self.post.image='newimagepic'
self.post.save()
self.assertEqual(self.post.image,'newimagepic')
def test_get_image_by_id(self):
image=Image.get_image_by_id(self.post.id)
self.assertEqual(image,self.post)
| 2.75 | 3 |
apps/cli/diana-cli.py | derekmerck/DIANA | 9 | 12794459 | <reponame>derekmerck/DIANA
import logging
import click_log
import click
from diana.apis import *
from utils.merge_yaml_sources import merge_yaml_sources
from commands.orthanc import ofind, pull
from commands.splunk import sfind
from commands.watch import watch
from commands.mock import mock
from commands.hello import hello
@click.group()
@click.option('-s', '--services',
help='Services description (DIANA_SERVICES)',
required=False)
@click.option('-S', '--services_path',
help='Path to services file or directory (DIANA_SERVICES_PATH)',
type=click.Path(exists=True),
required=False)
@click_log.simple_verbosity_option()
# @click.option('-v', '--verbose', help='Verbose logging', is_flag=True, multiple=True)
@click.pass_context
def cli(ctx, services, services_path):
# Create services context
all_services = merge_yaml_sources(services, services_path)
ctx.ensure_object(dict)
ctx.obj['SERVICES'] = all_services
# print(len(verbose))
#
# if len(verbose) >= 3:
# logging.basicConfig(level=logging.DEBUG)
# logging.info("Setting super-verbose")
# elif len(verbose) == 2:
# logging.basicConfig(level=logging.DEBUG)
# # Reduce junk output
# logging.getLogger("requests").setLevel(logging.WARNING)
# logging.getLogger("urllib3").setLevel(logging.WARNING)
# logging.getLogger("diana.utils.gateway.requester").setLevel(logging.WARNING)
# elif len(verbose) == 1:
# logging.basicConfig(level=logging.WARN)
# logging.getLogger("requests").setLevel(logging.WARNING)
# logging.getLogger("urllib3").setLevel(logging.WARNING)
# logging.getLogger("diana.utils.gateway.requester").setLevel(logging.WARNING)
# else:
# logging.basicConfig(level=logging.ERROR)
# logging.getLogger("requests").setLevel(logging.ERROR)
# logging.getLogger("urllib3").setLevel(logging.ERROR)
# logging.getLogger("diana.utils.gateway.requester").setLevel(logging.ERROR)
@click.command()
@click.argument('endpoints', nargs=-1)
@click.pass_context
def status(ctx, endpoints):
"""Report status of ENDPOINTS"""
services = ctx.obj['SERVICES']
click.echo('Reporting endpoint status')
if len(endpoints) == 0:
endpoints = services.keys()
click.echo(endpoints)
for key in endpoints:
ep = Orthanc(**services[key])
click.echo(ep)
click.echo(ep.info())
@click.command()
@click.argument('oid')
@click.argument('source')
@click.argument('path', type=click.File())
@click.pass_context
def get(ctx, oid, source, path):
"""Get a dixel by OID from SOURCE Orthanc service and save to PATH"""
click.echo(get.__doc__)
services = ctx.obj['SERVICES']
S = Orthanc(**services.get(source))
dixel = S.get(oid)
D = DicomFile()
D.put(dixel, path=path)
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.argument('destination')
@click.pass_context
def put(ctx, path, destination):
"""Put dixels at PATH in DESTINATION Orthanc service"""
click.echo(__doc__)
services = ctx.obj['SERVICES']
S = DicomFile()
dixel = S.get(path)
D = Orthanc(**services.get(destination))
destination.put(dixel)
@click.command()
@click.argument('dixel')
@click.argument('handler')
@click.argument('source')
@click.argument('destination')
@click.pass_context
def handle(ctx, dixel, handler, source, destination):
"""Retrieve a DIXEL from SOURCE, process it with HANDLER, and submit the result to DESTINATION"""
click.echo(handle.__doc__)
services = ctx.obj['SERVICES']
cli.add_command(status)
cli.add_command(get)
cli.add_command(ofind)
cli.add_command(pull)
cli.add_command(sfind)
cli.add_command(put)
cli.add_command(handle)
cli.add_command(watch)
cli.add_command(mock)
cli.add_command(hello)
if __name__ == '__main__':
cli(auto_envvar_prefix='DIANA') | 1.921875 | 2 |
ghwiz/ghwiz.py | nerdralph/h2k | 0 | 12794460 | <filename>ghwiz/ghwiz.py
#!/usr/bin/python
# <NAME> 2021, 2022
# Greener Homes wizard: creates H2K house models from templates
import photos
import math, os, sys
import xml.etree.ElementTree as ET
FT_PER_M = 3.28084
SF_PER_SM = FT_PER_M ** 2
if len(sys.argv) < 6:
print(sys.argv[0], "fileid template.h2k afl-height fperim farea [ta_delta] [afl_perim]")
print("f = foundation, t = top floor, outside measurements")
sys.exit()
args = sys.argv
fileid = args.pop(1)
template = args.pop(1)
# wall height in metres
wall_height_m = float(args.pop(1))/FT_PER_M
# outside foundation perimeter
operim = float(args.pop(1))
# foundation exterior area
barea = float(args.pop(1))
if barea < 0:
print("Invalid foundation area ", barea)
sys.exit()
# top floor exterior area difference from barea
ta_delta = float(args.pop(1)) if len(args) > 1 else 0
# above foundation perimeter if different than fperim
afl_perim = float(args.pop(1)) if len(args) == 1 else operim
t = ET.parse(template)
# extract photos
ymd = photos.extract(fileid)
# sample appointment text:
# 123 Main St, Dartmouth, NS B1H 0H0 <NAME> 902-555-1212
info = input("client info: ")
(street, city, rest) = info.split(',')
# skip over prov if present - set in h2k template
if rest[0:3] == " NS":
rest = rest[3:]
(postal, name, tel) = rest[1:8], rest[9:-10],rest[-12:]
# copy stick-framed 2x6 house specs
house_data = "../../" + fileid + "/" + fileid + "-house-data.txt"
os.system("cp 2x6-house.txt " + house_data)
hd = open(house_data, 'a')
hd.write(info)
c = t.find("./ProgramInformation/Client")
c.find("Name/First").text = name.split(' ')[0]
c.find("Name/Last").text = name.split(' ')[1]
c.find("Telephone").text = tel
sa = c.find("StreetAddress")
sa.find("Street").text = street
sa.find("City").text = city
# province set in h2k template
sa.find("PostalCode").text = postal
t.find("ProgramInformation/File").attrib["evaluationDate"] = ymd
t.find("ProgramInformation/File/Identification").text = fileid
#t.find("./House/Specifications/FacingDirection").attrib["code"] = FacingDirection
#t.find("./House/Specifications/YearBuilt").attrib["value"] = YearBuilt
storeys = 2 if wall_height_m > 4 else 1
# code 1 = 1 storey, 3 = 2 storey
t.find("House/Specifications/Storeys").attrib["code"] = "1" if storeys == 1 else "3"
hd.write("\nstoreys: " + str(storeys))
# calculate foundation and main floor area converted to metric
main_area_sm=(barea - afl_perim/2 +1)/SF_PER_SM
mperim_in_m = (afl_perim -4)/FT_PER_M
bsmt_area_sm=(barea -operim +4)/SF_PER_SM
bperim_in_m = (operim -8)/FT_PER_M
# calculate sign since ta_delta can be negative
if ta_delta != 0:
ta_sign = math.sqrt(pow(ta_delta,2))/ta_delta
else:
ta_sign = 1
# ta_delta is exterior area so reduce by sqrt for rough interior area
ta_sqrt = math.sqrt(abs(ta_delta))
tad_in_sm = (ta_delta - ta_sqrt * ta_sign)/SF_PER_SM
hfa = t.find("House/Specifications/HeatedFloorArea")
above_grade_sm = (main_area_sm * storeys) + tad_in_sm
hfa.attrib["aboveGrade"] = str(above_grade_sm)
hfa.attrib["belowGrade"] = str(bsmt_area_sm)
hd.write("\nheated floor area sf above grade: " + str(round(above_grade_sm * SF_PER_SM))
+ " below grade: " + str(round(bsmt_area_sm * SF_PER_SM)))
# calculate volume 7.75ft bsmt + 1' header + 8ft main flr
volume = ((7.75 + 1)/FT_PER_M * bsmt_area_sm) + wall_height_m * main_area_sm
# adjust for different top floor area with 8' ceiling and 1' floor
volume += tad_in_sm * 9/FT_PER_M
t.find("House/NaturalAirInfiltration/Specifications/House").attrib["volume"] = str(volume)
hd.write("\nhouse volume cf: " + str(round(volume * SF_PER_SM * FT_PER_M)))
# calculate highest ceiling height
# template has 4' pony, so add 1' above grade + 1' header to wall height
# highest ceiling is best calculated manually
#highest_ceiling = (4+1+1)/FT_PER_M + wall_height_m
#t.find("House/NaturalAirInfiltration/Specifications/BuildingSite").attrib["highestCeiling"] =
# str(highest_ceiling)
hc = t.find("House/Components")
ef = hc.find("Floor")
if ta_delta > 0:
# configure exposed floor
efl_m = math.sqrt(tad_in_sm)
ef.find("Measurements").attrib["area"] = str(tad_in_sm)
ef.find("Measurements").attrib["length"] = str(efl_m)
hd.write("\nexposed floor area, length: "
+ str(round(tad_in_sm * SF_PER_SM)) + ", "
+ str(round(efl_m * FT_PER_M)))
else:
hc.remove(ef)
m = hc.find("Ceiling/Measurements")
# eave length
c_len_m = mperim_in_m/2
m.attrib["length"] = str(c_len_m)
ceiling_area_sm = main_area_sm if ta_delta < 0 else main_area_sm + tad_in_sm
m.attrib["area"] = str(ceiling_area_sm)
hd.write("\nceiling area, length: " +
str(round(ceiling_area_sm * SF_PER_SM)) +
", " + str(round(c_len_m * FT_PER_M )))
m = hc.find("Wall/Measurements")
m.attrib["height"] = str(wall_height_m)
m.attrib["perimeter"] = str(mperim_in_m)
hd.write("\nwall height, perim: " +
str(round(wall_height_m * FT_PER_M )) +
", " + str(mperim_in_m * FT_PER_M))
# calculate foundation perim & area
hc.find("Basement").attrib["exposedSurfacePerimeter"] = str(bperim_in_m)
m = hc.find("Basement/Floor/Measurements")
m.attrib["area"] = str(bsmt_area_sm)
# H2K errror if perim <= 4*sqrt(area), common ratio is 1.05x
# relevant for semis and multiple foundations
bfp_m = math.sqrt(bsmt_area_sm)*4 * 1.05
m.attrib["perimeter"] = str(bfp_m)
hd.write("\nfoundation floor area, perim, exp. surface perim: " +
str(round(bsmt_area_sm * SF_PER_SM )) +
", " + str(round(bfp_m * FT_PER_M)) +
", " + str(round(bperim_in_m * FT_PER_M)))
# debug
#t.write("out.h2k", "UTF-8", True)
#sys.exit(0)
# write prepared h2k file
outfile = "../../" + fileid + ".h2k"
t.write(outfile, "UTF-8", True)
#os.system("unix2dos " + outfile)
| 2.640625 | 3 |
scripts/evaluate.py | rashikcs/Case-Study-Campaign-Optimization | 0 | 12794461 | import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from scripts.visualization import plot_roc_curve
from scripts.visualization import plot_precision_recall_curve
from scripts.visualization import plot_confusion_matrix
def to_labels(pos_probs, threshold):
return (pos_probs >= threshold).astype('int')
def get_cut_off_threshold_from_precision_recall(precision:list,
recall:list,
thresholds:list)->int:
try:
# convert to f score
fscore = (2 * precision * recall) / (precision + recall)
# locate the index of the largest f score
ix = np.argmax(fscore)
print('PR-curve threshold=%f, F-Score=%.3f' % (thresholds[ix], fscore[ix]))
return ix
except Exception as error:
raise Exception('Caught this error: ' + repr(error))
def get_cut_off_threshold_through_iteration(pos_probs:list, y_test:list)->float:
"""
Extracts cut off thresholds by itrating all possible values up to 3 decimal places
from 0.0001-1. Returns the value maximizes macro f1 score.
"""
try:
# define thresholds
thresholds = np.arange(0, 1, 0.0001)
# evaluate each threshold
scores = [f1_score(y_test, to_labels(pos_probs, t), average='macro') for t in thresholds]
# get best threshold
ix = np.argmax(scores)
print('Threshold=%.3f, Best macro F1-Score=%.5f' % (thresholds[ix], scores[ix]))
return thresholds[ix]
except Exception as error:
raise Exception('Caught this error: ' + repr(error))
def get_evaluation_report(test_set:list,
prediction_proba:list,
labels:list,
threshold:float = None,
plot:str='precision-recall',
save_path:str = None)->dict:
"""
Args:
test_set:list -> original target values
prediction_proba:list -> extension to use for serializing
labels:list -> target label names
threshold:float -> Probability cut off threshold
plot:str -> roc or precision-recall
save_path:str -> save directory
"""
try:
auc_score = 0
if plot=='roc':
fpr, tpr, _ = roc_curve(test_set, prediction_proba)
auc_score = roc_auc_score(test_set, prediction_proba)
plot_roc_curve(auc_score, fpr, tpr)
elif plot=='precision-recall':
precision, recall, thresholds = precision_recall_curve(test_set, prediction_proba)
auc_score = auc(recall, precision)
no_skill = np.sum(test_set==1)/test_set.shape
ix = get_cut_off_threshold_from_precision_recall(precision, recall, thresholds)
best_threshold_pos = (recall[ix], precision[ix])
plot_precision_recall_curve(auc_score,
recall,
precision,
best_threshold_pos,
round(no_skill[0], 2),
save_path)
#threshold = round(thresholds[ix], 3) if not threshold else None
if not threshold:
threshold = get_cut_off_threshold_through_iteration(prediction_proba, test_set)
predictions = prediction_proba>threshold
cr = classification_report(test_set, predictions, target_names=labels)
cm = confusion_matrix(test_set, predictions)
mcc = matthews_corrcoef(test_set, predictions)
print('\n',cr)
print('Matthews correlation coefficient: ', mcc)
plot_confusion_matrix(cm,
labels,
save_path=save_path)
return {'threshold':threshold,
'auc':auc_score,
'mcc':mcc,
'confusion_matrix': cm,
'classification_report':classification_report(test_set,
predictions,
target_names=labels,
output_dict=True)}
except Exception as error:
raise Exception('Caught this error: ' + repr(error)) | 2.703125 | 3 |
dssg_challenge/ga/algorithm/simulated_annealing.py | DavidSilva98/dssgsummit2020-challenge | 0 | 12794462 | <reponame>DavidSilva98/dssgsummit2020-challenge
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------------------------
"""
Simulated Annealing Meta-Heuristic
----------------------------------
Content:
▶ class Simulated Annealing
─────────────────────────────────────────────────────────────────────────
CIFO - Computation Intelligence for Optimization
Author: <NAME> - <EMAIL> - (2019) version L4.0
"""
# -------------------------------------------------------------------------------------------------
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
# C O D E
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
# -------------------------------------------------------------------------------------------------
# Class: Simulated Annealing
# -------------------------------------------------------------------------------------------------
class SimulatedAnnealing:
"""
Classic Implementation of Simulated Annealing with some improvements.
Improvements:
------------
1. Memory - avoiding to lose the best
2. C / Minimum C Calibration
Algorithm:
---------
1: Initialize
2: Repeat while Control Parameter >= Minimum Control Parameter
2.1. Internal Looping
2.1.1: Get the best neighbor
2.1.2: Select the best, between the current best and current best neighbor
2.1.3: Check stop condition ***
2.2. Update C (Control Parameter)
2.3: Check stop condition ***
3: Return the Solution
"""
# Constructor
#----------------------------------------------------------------------------------------------
def __init__(self, problem_instance, neighborhood_function, feedback = None, config = {}):
"""
Simulated Annealing Constructor
parameters:
-----------
* neighborhood_function - it is expected a function that must follow the signature:
<<neighborhood_function>>( solution, problem, neighborhood_size = 0 )
where:
- <neighborhood_function> it is the name of the neighborhood function implemented for the problem_instance
"""
self._problem_instance = problem_instance
self._get_neighbors = neighborhood_function
self._feedback = feedback
# memory (to avoid lost the best)
self._best_solution = None
# Search
#----------------------------------------------------------------------------------------------
def search(self):
"""
Simulated Annealing Search Method
----------------------------------
Algorithm:
1: Initialize
2: Repeat while Control Parameter >= Minimum Control Parameter
2.1. Internal Looping
2.1.1: Get the best neighbor
2.1.2: Select the best, between the current best and current best neighbor
2.1.3: Check stop condition ***
2.2. Update C (Control Parameter)
2.3: Check stop condition ***
3: Return the Solution
"""
pass
# Constructor
#----------------------------------------------------------------------------------------------
def _initialize(self):
"""
Initialize the initial solution, start C and Minimum C
"""
pass
# Constructor
#----------------------------------------------------------------------------------------------
def _select(self):
"""
Select the solution for the next iteration
"""
pass
# Constructor
#----------------------------------------------------------------------------------------------
def _get_random_neighbor(self, solution):
"""
Get a random neighbor of the neighborhood (internally it will call the neighborhood provided to the algorithm)
"""
pass
# Constructor
#----------------------------------------------------------------------------------------------
def _initialize_C(self):
"""
Use one of the available approaches to initialize C and Minimum C
"""
pass
# -------------------------------------------------------------------------------------------------
# Class: Simulated Annealing
# -------------------------------------------------------------------------------------------------
def initialize_C_approach1():
return 0, 0 | 2.0625 | 2 |
gongda/TROPOMI/TROPOMI_NO2_Suez_visualisations.py | eamarais/eam-group | 3 | 12794463 | #####################################################################################################################
#####################################################################################################################
# See how TROPOMI NO2 responds to the Suez Canal blockage
# When downloading the data, look at a larger domain (Suez and its surrounding + Mediterranean Sea)
import os
import glob
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import xarray as xr
'''
Note on this Suez Canal blockage
Blockage period: 23-29 March 2021
Data download period: 5 January - 26 April 2021
Domain (lon_min,lat_min,lon_max,lat_max): -20,5,60,50
Corresponding hour windows for data donwload: [6,7,8,9,10,11,12,13,14]
First test: sample weekly data before, during and after the blockage, get maps and time serires plot
Second test: get daily maps and combine with GeoViews
'''
#####################################################################################################################
# build a function to read oversampled TROPOMI NO2 as pandas dataframes
def read_oversampled_NO2(TROPOMI_oversampled_NO2_output_file):
'''read the output file for oversampled TROPOMI NO2'''
df = pd.read_csv(TROPOMI_oversampled_NO2_output_file,sep="\s+",header=None)
df = df.iloc[:,2:7]
df.columns = ['lat','lon','NO2','Count','NO2_uncertainty']
return df
#####################################################################################################################
# the spatial coverage may not be consistent on different days or during different weeks
# read all the data from the weekly results
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
Oversampled_NO2_files = sorted(glob.glob("Oversample_output_Suez_NO2_week*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files,sep="\n")
oversampled_data = [read_oversampled_NO2(file) for file in Oversampled_NO2_files]
# use all the data ever sampled to decide the max dimension
lat_min = []
lat_max = []
lon_min = []
lon_max = []
for i in range(len(oversampled_data)):
lat_min.append(oversampled_data[i].lat.min())
lat_max.append(oversampled_data[i].lat.max())
lon_min.append(oversampled_data[i].lon.min())
lon_max.append(oversampled_data[i].lon.max())
lat_min = min(lat_min)
lat_max = max(lat_max)
lon_min = min(lon_min)
lon_max = max(lon_max)
# check the full dimension
print("lat_min:",lat_min)
print("lat_max:",lat_max)
print("lon_min:",lon_min)
print("lon_max:",lon_max)
# With the dimension above and the resolution, we can create a consistent domain ("the full grid")
# so that we can combine the data from different days/weeks together
# first list all the lats and lons: use (min,max+1/2 resolutions, resolution) to keep the max value in Python
# just round the floats created by Python to be safe
# as the "pd.merge" step later will require the values of "keys" to be excatly the same
Res = 0.05
domain_lat = np.arange(lat_min,lat_max+Res/2,Res,dtype=None)
domain_lon = np.arange(lon_min,lon_max+Res/2,Res,dtype=None)
domain_lat = np.round(domain_lat,3)
domain_lon = np.round(domain_lon,3)
# build a function to create a "full grid" by listing the full combinations of lats and lons in the domain
def expand_grid(lat,lon):
'''list all combinations of lats and lons using expand_grid(lat,lon)'''
test = [(A,B) for A in lat for B in lon]
test = np.array(test)
test_lat = test[:,0]
test_lon = test[:,1]
full_grid = pd.DataFrame({'lat': test_lat, 'lon': test_lon})
return full_grid
# create the "full grid"
domain_grid = expand_grid(domain_lat,domain_lon)
print(domain_grid)
################################################################################################
# Now we can read each single dataset and match it with the full grid
# Step 1> select the oversampled data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# change input time to read daily data or weekly data
time = 'week_1'
Oversampled_NO2_file = "Oversample_output_Suez_NO2_"+str(time)+"_0.05"
# check the selected data
print(Oversampled_NO2_file)
# Step 2> feed the oversampled data into this data cleaning routine
# read oversampled NO2 data
NO2_data = read_oversampled_NO2(Oversampled_NO2_file)
# combine the data with the full domain grids
NO2_data = pd.merge(domain_grid,NO2_data,how='left', on=['lat','lon'])
NO2_data = NO2_data.sort_values(by=['lat','lon'], ascending=[True, True])
# reshape the variables from 1D in the dataframe to the map dimension
NO2 = NO2_data['NO2'].values.reshape(len(domain_lat),len(domain_lon))
NO2_uncertainty = NO2_data['NO2_uncertainty'].values.reshape(len(domain_lat),len(domain_lon))
Count = NO2_data['Count'].values.reshape(len(domain_lat),len(domain_lon))
# convert to xarray for plotting
NO2_xarray = xr.DataArray(NO2, coords=[('lat', domain_lat),('lon', domain_lon)])
NO2_uncertainty_xarray = xr.DataArray(NO2_uncertainty, coords=[('lat', domain_lat),('lon', domain_lon)])
Count_xarray = xr.DataArray(Count, coords=[('lat', domain_lat),('lon', domain_lon)])
# but it is complicated to save out the results one by one for multiple days or weeks
################################################################################################
################################################################################################
# So here we use the list comprehensions to process multiple files
#################
# weekly data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# select the files and sort them numerically
Oversampled_NO2_files_weekly = sorted(glob.glob("Oversample_output_Suez_NO2_week*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files_weekly,sep="\n")
# read oversampled data and match with the "full grid"
Oversampled_NO2_week = [read_oversampled_NO2(file) for file in Oversampled_NO2_files_weekly]
Oversampled_NO2_week = [pd.merge(domain_grid,data,how='left', on=['lat','lon']) for data in Oversampled_NO2_week]
Oversampled_NO2_week = [data.sort_values(by=['lat','lon'], ascending=[True, True]) for data in Oversampled_NO2_week]
# convert the data to the xarray format for plotting
NO2_week = [data['NO2'].values.reshape(len(domain_lat),len(domain_lon)) for data in Oversampled_NO2_week]
NO2_week_xr = [xr.DataArray(data, coords=[('lat', domain_lat),('lon', domain_lon)]) for data in NO2_week]
#################
# daily data
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Oversample_output')
# select the files and sort them numerically
Oversampled_NO2_files_daily = sorted(glob.glob("Oversample_output_Suez_NO2_day*"), key=lambda x: int(x.split("_")[-2]))
print(*Oversampled_NO2_files_daily,sep="\n")
# read oversampled data and match with the "full grid"
Oversampled_NO2_day = [read_oversampled_NO2(file) for file in Oversampled_NO2_files_daily]
Oversampled_NO2_day = [pd.merge(domain_grid,data,how='left', on=['lat','lon']) for data in Oversampled_NO2_day]
Oversampled_NO2_day = [data.sort_values(by=['lat','lon'], ascending=[True, True]) for data in Oversampled_NO2_day]
# convert the data to the xarray format for plotting
NO2_day = [data['NO2'].values.reshape(len(domain_lat),len(domain_lon)) for data in Oversampled_NO2_day]
NO2_day_xr = [xr.DataArray(data, coords=[('lat', domain_lat),('lon', domain_lon)]) for data in NO2_day]
################################################################################################
# Start making maps to have a quick look at the results
# avoid setting "%matplotlib inline" as it is time consuming when we need to produce many figures
import matplotlib.pyplot as plt
import cartopy.crs as crs
import geopandas as gpd
# read shape file (Global high resolution shoreline database from NOAA: https://www.ngdc.noaa.gov/mgg/shorelines/)
# use "full reolution" here to avoid misrepresentation of land and water
os.chdir("/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/shapefiles/gshhg-shp-2.3.7/GSHHS_shp/f")
world_shore = gpd.read_file("GSHHS_f_L1.shp")
################################################################################################
# build a function to quickly generate maps without a legend to save space on a slide
def quick_plot(input_xr,plot_domain,var_min,var_max,output_figure_name):
'''
Input a xarray data array, define the map domain, provide the min and max of the values on map. Provide a outputfile name.
'''
# set the figure size, the aspect ratio is set to be 2:1 due to the sampling region
fig = plt.figure(figsize=[20,10])
# set the map projection and domain: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html#cartopy-projection
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_domain)
# plot the value on map
im = input_xr.plot(ax=ax,cmap='jet',vmin=var_min,vmax=var_max)
# add shapefile
ax.add_geometries(world_shore.geometry, crs=ccrs.PlateCarree(),edgecolor='black',facecolor='none')
# remove the colorbar and tile
plt.delaxes(fig.axes[1])
ax.set_title('')
# save out
fig.savefig(output_figure_name, dpi=100,bbox_inches='tight')
# close the figure to avoid taking CPU memory
plt.close()
################################################################################################
# build a function to generatet the bar for the figures above
def plot_color_bar(input_xr,plot_domain,label,var_min,var_max,output_figure_name):
'''
Draw the figure in the same way as above, but remove the plot rather than the colorbar.
'''
fig = plt.figure(figsize=[20,10])
cbar_keys = {'shrink': 1, 'pad' : 0.05,'orientation':'horizontal','label':label}
# set the map projection: https://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html#cartopy-projection
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_domain)
# plotthe value on map
im = input_xr.plot(ax=ax,cmap='jet',cbar_kwargs=cbar_keys,vmin=var_min,vmax=var_max)
# set color bar label size
plt.rcParams.update({'font.size':25})
ax.xaxis.label.set_size(25)
# remove the plot
plt.delaxes(fig.axes[0])
# save out
fig.savefig(output_figure_name, dpi=100,bbox_inches='tight')
# close the figure to avoid taking CPU memory
plt.close()
################################################################################################
# check again the data for plotting
print("weekly data:",len(NO2_week_xr))
print("daily data:",len(NO2_day_xr))
# generate corresponding output file names
# weekly maps
Suez_weeks = list(range(1,17))
Suez_weeks = [str('Suez_NO2_map_week_') + str(week_number) for week_number in Suez_weeks]
print(*Suez_weeks,sep="\n")
# daily maps
Suez_days = list(range(1,29))
Suez_days = [str('Suez_NO2_map_day_') + str(date_number) for date_number in Suez_days]
print(*Suez_days,sep="\n")
################################################################################################
# output multiple plots together
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# maps during the blockage
# week 12
# day 8-14
# plot weekly data
# plot over the big domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_big = [-20,60,5,50]
for i in range(len(NO2_week_xr)):
quick_plot(NO2_week_xr[i],Suez_domain_big,0,2,Suez_weeks[i]+str('_big'))
# plot over the small domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_small = [26,60,10,35]
for i in range(len(NO2_week_xr)):
quick_plot(NO2_week_xr[i],Suez_domain_small,0,2,Suez_weeks[i]+str('_small'))
# generate the color bar at the end
plot_color_bar(NO2_week_xr[0],Suez_domain_small,'NO$_2$ tropospheric column [$10^{15}$ molec. cm$^{-2}$]',0,2,"Suez_NO2_color_bar")
# plot daily data
# plot over the small domain [lon_min,lon_max,lat_min,lat_max]
Suez_domain_small = [26,60,10,35]
for i in range(len(NO2_day_xr)):
quick_plot(NO2_day_xr[i],Suez_domain_small,0,2,Suez_days[i]+str('_small'))
################################################################################################
################################################################################################
# Use GeoViews to combine the maps together in time series
# load GeoViews package
import geoviews as gv
import geoviews.feature as gf
import cartopy.crs as crs
# it is important to check your geoviews version, some commands may not work in a wrong version
# this script is written under version 1.9.1
print(gv.__version__)
# there are two backends ('bokeh', 'matplotlib') for the GeoViews
# later we will use "bokeh" for interactive plots
################################################################################################
# weekly maps
# list all the weeks
Suez_weeks = ['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16']
print(*Suez_weeks,sep="\n")
# combine the xarray data arrays from weekly results
# make a copy first
weekly_data = NO2_week_xr.copy()
# add the variable name
weekly_data = [data.rename('NO2') for data in weekly_data]
# add a time dimension to the data
for i in range(len(NO2_week_xr)):
NO2_week_xr[i] = NO2_week_xr[i].assign_coords(week=Suez_weeks[i])
NO2_week_xr[i] = NO2_week_xr[i].expand_dims('week')
# combine the data together
NO2_week_xr_combined = xr.concat(NO2_week_xr,'week')
# you can zoom in and change maps, so normally there is no need to make a small map
# but if you have to reduce the file size, you can subset over the small domain
# weekly_data = [data.sel(lat=slice(10,35),lon = slice(26,60)) for data in weekly_data]
# check the results
NO2_week_xr_combined
# output the plots
# first move to the output directory
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# turn on "bokeh" backend to enable interactive map
gv.extension('bokeh')
# extract data from the combined xarray
gv_data = gv.Dataset(NO2_week_xr_combined,['lon','lat','week'],'NO2',crs=crs.PlateCarree())
# use the data to generate the geoviews image
gv_image = gv_data.to(gv.Image)
# decide features of the output figure
gv_image_out = gv_image.opts(cmap='jet', clim=(0,2), colorbar=True, width=800, height=500) * gf.coastline
# save out the interactive map
renderer = gv.renderer('bokeh')
renderer.save(gv_image_out, 'weekly_maps')
################################################################################################
# daily maps
# list all the dates
def list_dates_between(start_date,end_date):
'''Select TROPOMI files within the start date ('yyyymmdd') and end date ('yyyymmdd')'''
# list all the dates between the start and the end
from datetime import date, timedelta
start_date = date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
end_date = date(int(end_date[0:4]),int(end_date[4:6]),int(end_date[6:8]))
delta = end_date - start_date
sampling_dates = []
for i in range(delta.days + 1):
sampling_dates.append((start_date + timedelta(days=i)).strftime('%Y%m%d'))
# print out all the sampling dates
return sampling_dates
# list all the dates
Suez_days = list_dates_between("20210316","20210412")
print("number of days:",len(Suez_days))
print(*Suez_days,sep="\n")
# combine the xarray data arrays from daily results
# make a copy first
daily_data = NO2_day_xr.copy()
# add the variable name
daily_data = [data.rename('NO2') for data in daily_data]
# add a time dimension to the data
for i in range(len(NO2_day_xr)):
NO2_day_xr[i] = NO2_day_xr[i].assign_coords(date=Suez_days[i])
NO2_day_xr[i] = NO2_day_xr[i].expand_dims('date')
# combine the data together
NO2_day_xr_combined = xr.concat(NO2_day_xr,'date')
# check the results
NO2_day_xr_combined
# output the plots
# first move to the output directory
os.chdir('/rds/projects/2018/maraisea-glu-01/Study/Research_Data/TROPOMI/project_2_Suez_Canal/Figures')
# turn on "bokeh" backend to enable interactive map
gv.extension('bokeh')
# extract data from the combined xarray
gv_data = gv.Dataset(NO2_day_xr_combined,['lon','lat','date'],'NO2',crs=crs.PlateCarree())
# use the data to generate the geoviews image
gv_image = gv_data.to(gv.Image)
# decide features of the output figure
gv_image_out = gv_image.opts(cmap='jet', clim=(0,2), colorbar=True, width=800, height=500) * gf.coastline
# save out the interactive map
renderer = gv.renderer('bokeh')
renderer.save(gv_image_out, 'daily_maps')
# For now, the default coastline from GeoViews is used
# If you can crop and create your own shapefile, you should be able to use high resolution shorelines from NOAA
# Think about how to do this with geopandas
#####################################################################################################################
##################################################################################################################### | 2.640625 | 3 |
problem/01000~09999/04999/4999.py3.py | njw1204/BOJ-AC | 1 | 12794464 | print('gn'[input()>input()]+'o') | 2.125 | 2 |
AutomatedTesting/Assets/TestAnim/scene_export_motion.py | BreakerOfThings/o3de | 11 | 12794465 | <gh_stars>10-100
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import traceback, sys, uuid, os, json
import scene_export_utils
import scene_api.motion_group
#
# Example for exporting MotionGroup scene rules
#
def update_manifest(scene):
import azlmbr.scene.graph
import scene_api.scene_data
# create a SceneManifest
sceneManifest = scene_api.scene_data.SceneManifest()
# create a MotionGroup
motionGroup = scene_api.motion_group.MotionGroup()
motionGroup.name = os.path.basename(scene.sourceFilename.replace('.', '_'))
motionAdditiveRule = scene_api.motion_group.MotionAdditiveRule()
motionAdditiveRule.sampleFrame = 2
motionGroup.add_rule(motionAdditiveRule)
motionScaleRule = motionGroup.create_rule(scene_api.motion_group.MotionScaleRule())
motionScaleRule.scaleFactor = 1.1
motionGroup.add_rule(motionScaleRule)
# add motion group to scene manifest
sceneManifest.add_motion_group(motionGroup)
# Convert the manifest to a JSON string and return it
return sceneManifest.export()
sceneJobHandler = None
def on_update_manifest(args):
try:
scene = args[0]
return update_manifest(scene)
except RuntimeError as err:
print (f'ERROR - {err}')
scene_export_utils.log_exception_traceback()
except:
scene_export_utils.log_exception_traceback()
global sceneJobHandler
sceneJobHandler.disconnect()
sceneJobHandler = None
# try to create SceneAPI handler for processing
try:
import azlmbr.scene
sceneJobHandler = azlmbr.scene.ScriptBuildingNotificationBusHandler()
sceneJobHandler.connect()
sceneJobHandler.add_callback('OnUpdateManifest', on_update_manifest)
except:
sceneJobHandler = None
| 1.820313 | 2 |
ml_cdn_cifar.py | wiseodd/compound-density-networks | 14 | 12794466 | import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributions as dists
import numpy as np
import scipy.io
import foolbox
import input_data
import argparse
from tqdm import tqdm
import data_loader
import math
import os
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import CallableModelWrapper
from cleverhans.utils import AccuracyReport
from cleverhans.utils_pytorch import convert_pytorch_model_to_tf
parser = argparse.ArgumentParser()
parser.add_argument('--use_dropout', default=False, action='store_true')
parser.add_argument('--normalize', default=False, action='store_true')
parser.add_argument('--load', default=False, action='store_true')
parser.add_argument('--train_samples', type=int, default=1)
parser.add_argument('--n_samples', type=int, default=100)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--lam', type=float, default=1e-7)
parser.add_argument('--n_hidden', type=int, default=100)
parser.add_argument('--n_hidden_hypernet', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--n_iter', type=int, default=100)
parser.add_argument('--randseed', type=int, default=9999)
args = parser.parse_args()
np.random.seed(args.randseed)
torch.manual_seed(args.randseed)
name = 'mlcdn'
if args.use_dropout:
name = 'dropout'
os.makedirs('./results/cifar', exist_ok=True)
os.makedirs('./models/cifar', exist_ok=True)
# Load training data
trainset, testset = data_loader.load_dataset('cifar10_pretrained')
class ProbHypernet(nn.Module):
def __init__(self, in_dim, out_dim, h_dim=100):
super(ProbHypernet, self).__init__()
self.in_dim = in_dim + 1
self.out_dim = out_dim
self.h_dim = h_dim
self.M = nn.Parameter(torch.randn(self.in_dim, out_dim))
self.fc_xh = nn.Linear(in_dim, h_dim)
nn.init.uniform_(self.fc_xh.weight, -0.0001, 0.0001)
self.fc_hmu = nn.Linear(h_dim, self.in_dim)
nn.init.uniform_(self.fc_hmu.weight, -0.0001, 0.0001)
self.fc_hlogvar_in = nn.Linear(h_dim, self.in_dim)
nn.init.uniform_(self.fc_hlogvar_in.weight, -0.0001, 0.0001)
self.fc_hlogvar_out = nn.Linear(h_dim, out_dim)
nn.init.uniform_(self.fc_hlogvar_out.weight, -0.0001, 0.0001)
def forward(self, x, output_weight_params=False):
m = x.shape[0]
r, c = self.in_dim, self.out_dim
h = self.fc_xh(x)
h = F.relu(h)
mu_scaling = self.fc_hmu(h)
logvar_r = self.fc_hlogvar_in(h)
logvar_c = self.fc_hlogvar_out(h)
M = self.M
M = mu_scaling.view(m, r, 1) * M # Broadcasted: M is (m, r, c)
var_r = torch.exp(logvar_r)
var_c = torch.exp(logvar_c)
E = torch.randn(m, r, c, device='cuda')
# Reparametrization trick
W = M + torch.sqrt(var_r).view(m, r, 1) * E * torch.sqrt(var_c).view(m, 1, c)
# KL divergence to prior MVN(0, I, I)
D_KL = torch.mean(
1/2 * (torch.sum(var_r, 1)*torch.sum(var_c, 1) \
+ torch.norm(M.view(m, -1), dim=1)**2 \
- r*c - c*torch.sum(logvar_r, 1) - r*torch.sum(logvar_c, 1))
)
x = torch.cat([x, torch.ones(m, 1, device='cuda')], 1)
h = torch.bmm(x.unsqueeze(1), W).squeeze()
if output_weight_params:
return h, D_KL, (M, var_r, var_c)
else:
return h, D_KL
class Model(nn.Module):
def __init__(self, h_dim=100, h_dim_hypernet=50, use_dropout=False):
super(Model, self).__init__()
self.use_dropout = use_dropout
if not self.use_dropout:
self.fc_xh = ProbHypernet(1024, h_dim, h_dim_hypernet)
self.fc_hy = ProbHypernet(h_dim, 10, h_dim_hypernet)
else:
self.fc_xh = nn.Linear(1024, h_dim)
self.fc_hy = nn.Linear(h_dim, 10)
def forward(self, X):
X = X.squeeze()
if not self.use_dropout:
h, D_KL1 = self.fc_xh(X)
h = F.relu(h)
y, D_KL2 = self.fc_hy(h)
return (y, D_KL1+D_KL2) if self.training else y
else:
h = F.relu(self.fc_xh(X))
if self.use_dropout:
h = F.dropout(h, p=0.5, training=True)
y = self.fc_hy(h)
return y
def validate(m=args.batch_size):
model.eval()
val_acc = 0
total = 0
for x, y in testset:
x = x.cuda()
y_i = model.forward(x)
val_acc += np.sum(y_i.argmax(dim=1).cpu().numpy() == y.numpy())
total += x.shape[0]
model.train()
return val_acc/total
""" Training """
S = args.train_samples
m = args.batch_size
lr = args.lr
lam = args.lam
h_dim = args.n_hidden
h_dim_hypernet = args.n_hidden_hypernet
model = Model(h_dim, h_dim_hypernet, args.use_dropout).cuda()
print(f'Parameter count: {np.sum([value.numel() for value in model.parameters()])}')
if args.load:
model.load_state_dict(torch.load(f'models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin'))
else:
opt = optim.Adam(model.parameters(), lr, weight_decay=args.wd)
pbar = tqdm(range(args.n_iter))
for i in pbar:
for x, y in trainset:
x = x.cuda()
y = y.cuda()
if not args.use_dropout:
log_p_y = []
D_KL = 0
for _ in range(S):
y_s, D_KL = model.forward(x)
log_p_y_s = dists.Categorical(logits=y_s).log_prob(y)
log_p_y.append(log_p_y_s)
loss = -torch.mean(torch.logsumexp(torch.stack(log_p_y), 0) - math.log(S))
loss += args.lam*D_KL
else:
out = model.forward(x)
loss = F.cross_entropy(out, y)
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), 5)
opt.step()
opt.zero_grad()
val_acc = validate(m)
pbar.set_description(f'[Loss: {loss.data.item():.3f}; val acc: {val_acc:.3f}]')
# Save model
if not args.load:
torch.save(model.state_dict(), f'models/cifar/model_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.bin')
""" =============================== Validate ======================================= """
def test():
model.eval()
y = []
t = []
for x_test, y_test in testset:
x_test = x_test.cuda()
y_i = model.forward(x_test)
y.append(F.softmax(y_i, dim=1).cpu().data.numpy())
t.append(y_test)
y = np.concatenate(y, 0)
t = np.concatenate(t)
return y, t
y_val = 0
for _ in tqdm(range(args.n_samples)):
y_s, t = test()
y_val += 1/args.n_samples*y_s
# Print accuracy
acc = np.mean(y_val.argmax(1) == t)
print(f'Test accuracy on CIFAR-10: {acc:.3f}')
""" ======================= Adversarial examples experiments ======================= """
model.eval()
input_shape = (None, 3, 32, 32)
trainset, testset = data_loader.load_dataset('cifar10')
pretrained_model = torchvision.models.densenet121(pretrained=True).cuda()
pretrained_model = torch.nn.Sequential(*(list(pretrained_model.children())[:-1]))
pretrained_model.eval()
model = nn.Sequential(pretrained_model, model)
model.eval()
# We use tf for evaluation on adversarial data
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
x_op = tf.placeholder(tf.float32, shape=input_shape)
# Convert pytorch model to a tf_model and wrap it in cleverhans
tf_model_fn = convert_pytorch_model_to_tf(model, out_dims=10)
cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')
adv_accs = []
adv_ents = []
def test_tf(use_adv=True):
preds = []
y_test = []
total = 0
for x, y in testset:
x = x.permute(0, 3, 1, 2)
if use_adv:
pred = sess.run(adv_preds_op, feed_dict={x_op: x})
pred = F.softmax(torch.from_numpy(pred), 1).numpy()
else:
pred = model.forward(x.cuda())
pred = F.softmax(pred, 1).cpu().data.numpy()
preds.append(pred)
y_test.append(y)
total += x.shape[0]
if total >= 1000:
break
preds = np.concatenate(preds, 0)
y_test = np.concatenate(y_test, 0)
return np.nan_to_num(preds), y_test
adv_preds = 0
for _ in tqdm(range(args.n_samples)):
preds, y_test = test_tf(False)
adv_preds += 1/args.n_samples * preds
# Compute acc and entropy
acc = (np.argmax(adv_preds, axis=1) == y_test).mean()
ent = (-adv_preds*np.log(adv_preds+1e-8)).sum(1).mean()
adv_accs.append(acc)
adv_ents.append(ent)
print('Adv accuracy: {:.3f}'.format(acc))
print('Avg entropy: {:.3f}'.format(ent))
for eps in np.arange(0.1, 1.01, 0.1):
# Create an FGSM attack
fgsm_op = FastGradientMethod(cleverhans_model, sess=sess)
fgsm_params = {'eps': eps,
'clip_min': 0.,
'clip_max': 1.}
adv_x_op = fgsm_op.generate(x_op, **fgsm_params)
adv_preds_op = tf_model_fn(adv_x_op)
# Run an evaluation of our model against fgsm
# Use M data
adv_preds = 0
for _ in tqdm(range(args.n_samples)):
preds, y_test = test_tf()
adv_preds += 1/args.n_samples * preds
# Compute acc and entropy
acc = (np.argmax(adv_preds, axis=1) == y_test).mean()
ent = (-adv_preds*np.log(adv_preds+1e-8)).sum(1).mean()
adv_accs.append(acc)
adv_ents.append(ent)
print('Adv accuracy: {:.3f}'.format(acc))
print('Avg entropy: {:.3f}'.format(ent))
sess.close()
# Save data
np.save(f'results/cifar/accs_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy', adv_accs)
np.save(f'results/cifar/ents_adv_{name}_{h_dim}_{h_dim_hypernet}_{m}_{lr}_{args.wd}_{args.lam}_{S}.npy', adv_ents)
| 1.945313 | 2 |
app/migrations/0003_photo_pic.py | taojy123/Babylon | 0 | 12794467 | <gh_stars>0
# Generated by Django 3.0.8 on 2020-08-04 03:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_photo_hidden'),
]
operations = [
migrations.AddField(
model_name='photo',
name='pic',
field=models.FileField(blank=True, null=True, upload_to='babylon'),
),
]
| 1.53125 | 2 |
backend/registry/enums/document.py | Don-King-Kong/mrmap | 10 | 12794468 | from MrMap.enums import EnumChoice
class DocumentEnum(EnumChoice):
""" Defines all document types
"""
CAPABILITY = "Capability"
METADATA = "Metadata"
| 2.15625 | 2 |
python/qibuild/actions/list_configs.py | vbarbaresi/qibuild | 0 | 12794469 | <reponame>vbarbaresi/qibuild
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
"""List all the known configs """
import operator
from qisys import ui
import qisys.parsers
import qibuild.worktree
def configure_parser(parser):
qisys.parsers.worktree_parser(parser)
def do(args):
worktree = qisys.parsers.get_worktree(args, raises=False)
qibuild_cfg = qibuild.config.QiBuildConfig()
qibuild_cfg.read()
configs = qibuild_cfg.configs.values()
configs.sort(key=operator.attrgetter("name"))
ui.info("Known configs")
for config in configs:
ui.info("*", config)
default_config = None
if worktree:
build_worktree = qibuild.worktree.BuildWorkTree(worktree)
default_config = build_worktree.default_config
if default_config:
ui.info("Worktree in", build_worktree.root,
"is using", default_config, "as a default config")
| 1.875 | 2 |
graph_2_systems_rmsd.py | Miro-Astore/mdanalysis_scripts | 1 | 12794470 | <reponame>Miro-Astore/mdanalysis_scripts<filename>graph_2_systems_rmsd.py
import numpy as np
import os
import MDAnalysis as mda
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
system_list=['_scratch_r16_ma2374_gmx_cftr_2nd_round_310K_I37R_3_res_rmsd.dat','_scratch_r16_ma2374_gmx_cftr_2nd_round_310K_wt_1_res_rmsd.dat']
colors=['orange','tab:blue']
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
#cwd=os.getcwd()
#system_list=np.loadtxt('system_list',dtype=str)
#num_systems=len(system_list)
plt.figure(figsize=(10,5))
numblocks=1
sys_count=1
gridspec.GridSpec(2,1)
plt.subplot2grid((2,1),(0,0),colspan=1,rowspan=1)
count=0
for i in system_list:
sys_name=i.split('_')[-4].upper()
print (sys_name)
data = np.loadtxt(str(i))
data=[row for row in data if row[1] >= 800000]
#data=data[data[:,1]>=800000,:]
data=np.array(data)
x = np.array(data[:,1])*0.001
y = data[:,3]
plt.plot(x[::2],y[::2],label=sys_name,color=colors[count])
count=count+1
plt.title('RMSD of the lasoo domain in the wild type and the I37R mutant (residues 1 to 44) ',fontsize=18)
plt.xlabel('time (ns)',fontsize=16)
plt.ylabel('lasoo domain RMSD ($\AA$)',fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim([0,5.5])
plt.legend()
color_vec=['blue']*70
color_vec[36]='orange'
res_results=np.zeros((2,70))
row_num=0
for i in system_list:
sys_name=i.split('_')[-4].upper()
print (sys_name)
data = np.loadtxt(str(i))
data=[row for row in data if row[1] >= 800000]
data=np.array(data)
resnum=len(data[0,3:-1])
x = data[:,1]*0.001
y = data[:,3]
resnum=len(data[0,3:-1])
data = np.loadtxt(str(i))
data=[row for row in data if row[1] >= 800000]
data=np.array(data)
resnum=len(data[0,3:-1])
blocks=chunkIt(np.array(data[:,0]),numblocks)
blocks=np.array(blocks).astype(int)
width = 0.45
inds = np.array(range(1,len(data[0,3:-1])+1))
# for j in range(numblocks):
block=blocks[-1]
resrmsd=np.zeros(resnum)
for k in range(resnum):
resrmsd[k]=np.mean(data[:,4+k])
#plt.bar(inds+j*width,resrmsd,width,color=color_vec)
res_results[row_num,:]=resrmsd
#plt.ylim([0,6])
# plt.ylabel('Residue specific RMSDs $(\AA)$')
# plt.xlabel('residue number')
print (str(i))
data = np.loadtxt(str(i))
rows=(data[:,1]>=800000)
data=data[rows,:]
resnum=len(data[0,3:-1])
x = data[:,1]*0.001
y = data[:,3]
sys_count=sys_count+1
row_num=row_num+1
plt.subplot2grid((2,1),(1,0),colspan=1,rowspan=1)
separation=0.1
#yellow bar on mutant results
#plt.bar(np.array([37])-width*0.50,1.05*np.amax(res_results),width,color='yellow')
#plt.bar(np.array([37])+width*0.50,1.05*np.amax(res_results),width,color='yellow')
plt.bar(inds-width*0.5,res_results[0,:],width,color='orange',label='I37R')
plt.bar(inds+width*0.5,res_results[1,:],width,color='tab:blue',label='WT')
plt.ylim([0,1.05*np.amax(res_results)])
plt.title('Residue Specific Lasoo Domain RMSDs (beyond 800ns)',fontsize=18)
plt.xlabel('residue number',fontsize=14)
plt.ylabel('CA RMSD ($\AA$)',fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim([0,50+0.49])
plt.legend()
plt.tight_layout()
plt.savefig('res_rmsd.pdf')
plt.show()
| 2.421875 | 2 |
src/langumo/building/mergence.py | affjljoo3581/langumo | 7 | 12794471 | """
Mergence
^^^^^^^^
All parsed plain text files should be merged into a single file to handle them
as an unified large corpus data.
.. autoclass:: MergeFiles
"""
from langumo.building import Builder
from langumo.utils import AuxiliaryFile, AuxiliaryFileManager, colorful
class MergeFiles(Builder):
"""Merge files into a single one.
Note:
All documents are separated by new-line character(``\\n``) and this
builder automatically appends the new-line character to avoid mixing
the last document of a file and the first document of another one.
"""
def build(self, afm: AuxiliaryFileManager, *inputs: AuxiliaryFile
) -> AuxiliaryFile:
merged = afm.create()
print(colorful.render(f'<r>[*]</r> merge <m>{len(inputs)}</m> files '
f'into one'))
with merged.open('wb') as dst, \
AuxiliaryFile.opens(inputs, 'rb') as srcs:
for src in srcs:
for line in src:
# Add break-line character to the end of text to avoid
# being merged with other line.
line += b'\n' if not line.endswith(b'\n') else b''
dst.write(line)
return merged
| 3.015625 | 3 |
src/test/kalman/numpy_kalman.py | chrisspen/homebot | 8 | 12794472 | """
From http://arxiv.org/pdf/1204.0375.pdf
"""
from numpy import dot, sum, tile, linalg
from numpy.linalg import inv
def kf_predict(X, P, A, Q, B, U):
"""
X: The mean state estimate of the previous step (k−1).
P: The state covariance of previous step (k−1).
A: The transition n × n matrix.
Q: The process noise covariance matrix.
B: The input effect matrix.
U: The control input.
"""
X = dot(A, X) + dot(B, U)
P = dot(A, dot(P, A.T)) + Q
return(X,P)
def kf_update(X, P, Y, H, R):
"""
K: the Kalman Gain matrix
IM: the Mean of predictive distribution of Y
IS: the Covariance or predictive mean of Y
LH: the Predictive probability (likelihood) of measurement which is computed using the Python function gauss_pdf.
"""
IM = dot(H, X)
IS = R + dot(H, dot(P, H.T))
K = dot(P, dot(H.T, inv(IS)))
X = X + dot(K, (Y-IM))
P = P - dot(K, dot(IS, K.T))
LH = gauss_pdf(Y, IM, IS)
return (X,P,K,IM,IS,LH)
def gauss_pdf(X, M, S):
if M.shape()[1] == 1:
DX = X - tile(M, X.shape()[1])
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
elif X.shape()[1] == 1:
DX = tile(X, M.shape()[1])- M
E = 0.5 * sum(DX * (dot(inv(S), DX)), axis=0)
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
else:
DX = X-M
E = 0.5 * dot(DX.T, dot(inv(S), DX))
E = E + 0.5 * M.shape()[0] * log(2 * pi) + 0.5 * log(det(S))
P = exp(-E)
return (P[0],E[0])
from numpy import *
from numpy.linalg import inv
#time step of mobile movement
dt = 0.1
# Initialization of state matrices
X = array([[0.0], [0.0], [0.1], [0.1]])
P = diag((0.01, 0.01, 0.01, 0.01))
A = array([[1, 0, dt , 0], [0, 1, 0, dt], [0, 0, 1, 0], [0, 0, 0, 1]])
Q = eye(X.shape()[0])
B = eye(X.shape()[0])
U = zeros((X.shape()[0],1))
# Measurement matrices
Y = array([[X[0,0] + abs(randn(1)[0])], [X[1,0] + abs(randn(1)[0])]])
H = array([[1, 0, 0, 0], [0, 1, 0, 0]])
R = eye(Y.shape()[0])
# Number of iterations in Kalman Filter
N_iter = 50
# Applying the Kalman Filter
for i in arange(0, N_iter):
(X, P) = kf_predict(X, P, A, Q, B, U)
(X, P, K, IM, IS, LH) = kf_update(X, P, Y, H, R)
Y = array([[X[0,0] + abs(0.1 * randn(1)[0])],[X[1, 0] + abs(0.1 * randn(1)[0])]])
| 3.046875 | 3 |
setup.py | Yoctol/word-embedder | 3 | 12794473 | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
long_description = open("README.md").read()
except IOError:
long_description = ""
setup(
name="word-embedder",
version="1.0.0",
description="Word Embedder",
license="MIT",
author="Solumilken",
packages=find_packages(),
install_requires=[
"mkdir-p>=0.1.1",
"numpy>=1.15.1",
"python-dotenv==0.9.1",
],
long_description=long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
],
)
| 1.640625 | 2 |
kite-python/kite_ml/kite/exp/eligible_users/retention_rate.py | kiteco/kiteco-public | 17 | 12794474 | <filename>kite-python/kite_ml/kite/exp/eligible_users/retention_rate.py<gh_stars>10-100
from typing import Callable
import datetime
import pandas as pd
def naive_retention_fn(df: pd.DataFrame) -> pd.Series:
"""
Calculates naive retention rate based entirely on the kite_status data
:param df: a DataFrame containing user counts for the following columns
- [unactivated, activate, lost, dormant] - e.g. as returned by counts_for_daily_cohort()
:return: a Series, with the same index as df, containing the calculated retention rate for each row
"""
return df.active / (df.active + df.lost + df.dormant)
def true_retention_fn(
win_survey: pd.DataFrame,
active_days: int) -> Callable[[pd.DataFrame], pd.Series]:
"""
Returns a function that can calculate the "true retention" rate given a DataFrame of daily user counts by cohort
(e.g. as returned by counts_for_daily_cohort())
For the motivation behind finding the true retention rate, see:
https://kite.quip.com/TJxqAJs7vz05/Eligible-Users
...but the gist of it is that we want to find out the rate at which eligible users (i.e. those who are Python
coders who use a Kite-supported editor) retain.
To achieve this, we begin with the assumption that when looking at just the numbers from kite_status events,
our categorization of dormant and lost users may be incorrect (due to the fact that it may count ineligible users,
or issues with collecting metrics).
To fix this, we look at the results of the Windows user survey:
https://kite.quip.com/DgfvAoP0WOma/Windows-User-Survey
This survey includes questions about the last time a user coded in Python and the last time a user coded using
Kite. From these we can determine whether a user is truly lost or dormant, at least if we take that user's survey
responses at face value.
We then calculate the fractions of:
- lost (categorized by us) respondents who claim to have used Python but not Kite within the past <active_days>
(lost_lost_rate)
- lost (categorized by us) survey respondents who claim to have used both Kite and Python within the past
<active_days> (lost_active_rate)
- dormant (categorized by us) survey respondents who claim to have used Python but not Kite within the past
<active_days> (dormant_lost_rate)
- dormant (categorized by us) survey respondents who claim to have used Python but not Kite within the past
<active_days> (dormant_active_rate)
We then apply corrections to the retention rate by redistributing our lost and dormant users according to these
calculated rates, using the assumption that this rate holds for every measured cohort.
active_users =
active_count + (dormant_count * dormant_active_rate) + (lost_count * lost_active_rate)
churned_users =
(dormant_count * dormant_lost_rate) + (lost_count * lost_lost_rate)
true_retention_rate = active_users / (active_users + churned_users)
:param histories: user history DataFrame, as returned by load_user_histories()
:param users: users DataFrame, as returned by get_user_totals()
:param win_survey: windows survey result, as returned by windows_survey.get_responses()
:param active_days: the active-day definition (the "n" in "n-day active")
:return: a function that operates on a DataFrame containing user counts for the following columns
- [unactivated, activate, lost, dormant] and returns a series containing just one column with the retention rate
"""
# determine what are the responses to the "last used Kite" / "last used Python" questions that indicate the user
# is still using Kite/Python within the desired window
if active_days == 14:
active_choices = {'14'}
elif active_days == 30:
active_choices = {'14', '30'}
elif active_days == 90:
active_choices = {'14', '30', '90'}
else:
raise ValueError("active_days needs to be in {14,30,90}")
# we only consider respondents who answered both of the "last used Kite"/ "last used Python" questions
resps = win_survey[(win_survey.last_used_kite != '') & (win_survey.last_used_py != '')]
lost_resps = resps[resps.last_day < resps.started - datetime.timedelta(days=active_days)]
lost_active = lost_resps[
lost_resps.last_used_kite.isin(active_choices) & lost_resps.last_used_py.isin(active_choices)]
lost_active_rate = len(lost_active) / len(lost_resps)
lost_lost = lost_resps[
~lost_resps.last_used_kite.isin(active_choices) & lost_resps.last_used_py.isin(active_choices)]
lost_lost_rate = len(lost_lost) / len(lost_resps)
dormant_resps = resps[(resps.last_day >= resps.started - datetime.timedelta(days=active_days)) &
(resps.last_py_event < resps.started - datetime.timedelta(days=active_days))]
dormant_active = dormant_resps[
dormant_resps.last_used_kite.isin(active_choices) & dormant_resps.last_used_py.isin(active_choices)]
dormant_active_rate = len(dormant_active) / len(dormant_resps)
dormant_lost = dormant_resps[
~dormant_resps.last_used_kite.isin(active_choices) & dormant_resps.last_used_py.isin(active_choices)]
dormant_lost_rate = len(dormant_lost) / len(dormant_resps)
def retention_fn(df: pd.DataFrame) -> pd.Series:
active = df.active + (df.dormant * dormant_active_rate) + (df.lost * lost_active_rate)
churned = (df.dormant * dormant_lost_rate) + (df.lost * lost_lost_rate)
return active / (active + churned)
return retention_fn
| 3 | 3 |
tests/test_resources.py | paulopes/panel-components | 4 | 12794475 | """The purpose of this module is to test the TemporaryResources context manager
The purpose of the TemporaryResources context manager is to enable using temporary, specific
configuration of resources when creating a custom Template.
If you use the global configuration `pn.config` for your templates you will include the same
css and js files in all templates. This is problematic if you want different templates, like for
example a light template, a dark template, a bootstrap template, a material template, a template
with Plotly Plots, a template without Plotly plots etc.
"""
import panel as pn
import pytest
from panel_components.resources import TemporaryResources
# pylint: disable=missing-function-docstring
@pytest.fixture(scope="function", autouse=True)
def clear_config_except_panel_css():
"""Reset pn.config except for panel css"""
# pylint: disable=protected-access
pn.config.raw_css = []
pn.config.js_files = {}
pn.config.css_files = [
file for file in pn.config.css_files if TemporaryResources._is_panel_style_file(file)
]
@pytest.fixture()
def clear_config():
"""Reset pn.config"""
pn.config.raw_css = []
pn.config.js_files = {}
pn.config.css_files = []
def _contains_bokeh_and_panel_resources(text):
return (
"bokeh-" in text
and "bokeh-widgets" in text
and "bokeh-tables" in text
and ".panel-widget-box"
)
def test_does_not_include_pn_config_raw_css():
# Given
pre_raw_css = "body {background: black;"
# When
pn.config.raw_css.append(pre_raw_css)
backup = pn.config.raw_css
with TemporaryResources():
text = pn.io.resources.Resources().render()
# Then
assert pre_raw_css not in text
assert pn.config.raw_css == backup
assert _contains_bokeh_and_panel_resources(text)
def test_does_not_include_pn_config_css_files():
# Given
pre_css_file = "https://somedomain.com/test.css"
# When
pn.config.css_files.append(pre_css_file)
backup = pn.config.css_files
with TemporaryResources():
text = pn.io.resources.Resources().render()
# Then
assert pre_css_file not in text
assert pn.config.css_files == backup
assert _contains_bokeh_and_panel_resources(text)
def test_does_not_include_pn_config_js_files():
# Given
pre_js = "http://some/domain.com/test.js"
# When
pn.config.js_files = {"somejs": pre_js}
backup = pn.config.js_files
with TemporaryResources():
text = pn.io.resources.Resources().render()
# Then
assert pre_js not in text
assert pn.config.js_files == backup
assert _contains_bokeh_and_panel_resources(text)
def test_does_not_include_pn_extension():
# Given
pre_extension = "plotly"
# When
pn.extension(pre_extension)
with TemporaryResources():
text = pn.io.resources.Resources().render()
# Then
assert pre_extension not in text
assert _contains_bokeh_and_panel_resources(text)
def test_includes_template_extension():
extension = "katex"
with TemporaryResources(extensions={extension}):
text = pn.io.resources.Resources().render()
assert extension in text
assert _contains_bokeh_and_panel_resources(text)
def test_includes_template_raw_css():
raw_css = "body {background: black;"
with TemporaryResources(raw_css=[raw_css]):
text = pn.io.resources.Resources().render()
assert raw_css in text
assert _contains_bokeh_and_panel_resources(text)
def test_includes_template_css_files():
css_file = "https://somedomain.com/test.css"
with TemporaryResources(css_files=[css_file]):
text = pn.io.resources.Resources().render()
assert css_file in text
assert _contains_bokeh_and_panel_resources(text)
def test_includes_template_js_files():
js_file = "http://some/domain.com/test.js"
with TemporaryResources(js_files={"somejs": js_file}):
text = pn.io.resources.Resources().render()
assert js_file in text
assert _contains_bokeh_and_panel_resources(text)
def test_can_exclude_panel_css():
with TemporaryResources(include_panel_css=False):
text = pn.io.resources.Resources().render()
assert ".panel-widget-box" not in text
def test_complex_use_case():
# Given
pre_raw_css = "body {background: black;"
pre_css_file = "https://somedomain.com/test.css"
pre_js = "http://some/domain.com/test.js"
pre_extension = "plotly"
extension = "katex"
# When
pn.extension(pre_extension)
pn.config.raw_css.append(pre_raw_css)
pn.config.css_files.append(pre_css_file)
pn.config.js_files = {"somejs": pre_js}
backup_css_files = pn.config.css_files
with TemporaryResources(extensions={extension}, include_panel_css=False):
text = pn.io.resources.Resources().render()
# Then
assert "bokeh-" in text
assert "bokeh-widgets" in text
assert "bokeh-tables" in text
assert ".panel-widget-box" not in text
assert extension in text
assert pre_raw_css not in text
assert pre_css_file not in text
assert pre_js not in text
assert pre_extension not in text
assert pn.config.raw_css == [pre_raw_css]
assert pn.config.js_files == {"somejs": pre_js}
assert pn.config.css_files == backup_css_files
| 2.375 | 2 |
tests/unit/modules/test_create.py | MatthieuBlais/freeldep | 0 | 12794476 | <reponame>MatthieuBlais/freeldep
import pytest
from freeldep.modules.create import validate_bucket
from freeldep.modules.create import validate_emails
from freeldep.modules.create import validate_name
from freeldep.modules.create import validate_registry
def test_validate_name():
assert validate_name("sdfsdfdfsdf") == "sdfsdfdfsdf"
assert (
validate_name("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
== "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
)
with pytest.raises(ValueError):
validate_name("sdfsdfd223!#fsdf") == "sdfsdfdfsdf"
def test_validate_bucket():
assert validate_bucket("test", "testbucket") == "testbucket"
with pytest.raises(ValueError):
validate_bucket("test", "testbucket@")
bucket = validate_bucket("test", None)
assert bucket.startswith("test")
assert bucket.startswith("test-deployer-artifact-bucket-")
assert len(bucket.split("-")[-1]) == 6
def test_validate_registry():
assert validate_registry("test", "testregistry") == "testregistry"
assert (
validate_registry(
"test",
"testregistrysssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss"
"sssssssssssssssssssssssss",
)
== "testregistrysssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss"
"sssssssssssssssssssss"
)
with pytest.raises(ValueError):
validate_bucket("test", "testregistry@")
registry = validate_registry("test", None)
assert registry.startswith("test")
def test_validate_emails():
assert validate_emails("<EMAIL>")[0] == "<EMAIL>"
assert len(validate_emails("<EMAIL>,<EMAIL>")) == 2
assert len(validate_emails("<EMAIL>,test2")) == 1
| 2.15625 | 2 |
class9/exercise4.py | papri-entropy/pyplus | 0 | 12794477 | #!/usr/bin/env python
"""
4a. Add nxos1 to your my_devices.py file.
Ensure that you include the necessary information to set the NX-API port to 8443.
This is done using 'optional_args' in NAPALM so you should have the following key-value pair defined:
"optional_args": {"port": 8443}
4b. Create a new function named 'create_checkpoint'.
Add this function into your my_functions.py file.
This function should take one argument, the NAPALM connection object.
This function should use the NAPALM _get_checkpoint_file() method
to retrieve a checkpoint from the NX-OS device.
It should then write this checkpoint out to a file.
Recall that the NX-OS platform requires a 'checkpoint' file
for configuration replace operations.
Using this new function, retrieve a checkpoint
from nxos1 and write it to the local file system.
4c. Manually copy the saved checkpoint to a new file
and add an additional loopback interface to the configuration.
4d. Create a Python script that stages a complete configuration replace operation
(using the checkpoint file that you just retrieved and modified).
Once your candidate configuration is staged perform a compare_config (diff)
on the configuration to see your pending changes.
After the compare_config is complete, then use the discard_config() method
to eliminate the pending changes.
Next, perform an additional compare_config (diff) to verify that you have
no pending configuration changes.
Do not actually perform the commit_config as part of this exercise.
"""
import my_devices
from pprint import pprint
from napalm import get_network_driver
from my_functions import napalm_conn, create_backup, create_checkpoint
# Disable Self-signed Certificate Warnings
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
if __name__=="__main__":
# Creating the nxos napalm connection
nxos1 = my_devices.nxos1
nxos1_hostname = nxos1['hostname']
device_conn = napalm_conn(nxos1)
print("#" * 50)
print(f"Printing {nxos1_hostname} napalm connection: ")
print("#" * 50)
print(device_conn)
# Creating the nxos checkpoint file
filename = f"{nxos1_hostname}_checkpoint"
checkpoint = create_checkpoint(device_conn, filename)
# Napalm Config Replace staging
device_conn.load_replace_candidate(filename=f"{nxos1_hostname}_config")
print("#" * 50)
print(f"Printing {nxos1_hostname} DIFFS candidate vs running before commiting: ")
print("#" * 50)
print(device_conn.compare_config())
device_conn.discard_config()
print("#" * 50)
print(f"Printing {nxos1_hostname} DIFFS candidate vs running after discarding the staged candidate: ")
print("#" * 50)
print(device_conn.compare_config())
device_conn.close()
| 2.515625 | 3 |
zeusproject/templates/app/users/controllers.py | zerossB/zeus | 6 | 12794478 | # -*- coding: utf-8 -*-
"""
{{NAMEPROJECT}}.users.controllers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{{NAME}} user controllers module
:copyright: (c) {{YEAR}} by {{AUTHOR}}.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app, render_template, Blueprint
from flask_security import login_required
blueprint = Blueprint('users', __name__, url_prefix='/users')
@blueprint.route('/profile')
@login_required
def profile():
"""return user profle."""
current_app.logger.debug(u'Get profile user.')
return render_template('users/profile.html')
| 2.015625 | 2 |
src/pi_drone_server/app.py | JoshuaBillson/PiDroneServer | 0 | 12794479 | # Ros Client
import rospy
# Standard Python Libraries
import threading
import os
import time
# Messages
from geometry_msgs.msg import Twist
# Third Party Libraries
from flask import Flask, request, Response
from pi_drone_server.html import html
from pi_drone_server.camera import Camera
# Globals
current_speed = 0
current_turn = 0
ping_time = 0
write_event = threading.Event()
app = Flask(__name__)
# Constants
TIMEOUT = 1.5 # Seconds
direction = rospy.Publisher("robot_twist", Twist, queue_size=10)
@app.route('/')
def view():
return html
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/control")
def control():
global direction, current_speed, current_turn, write_event
# Decode Speed
if 'speed' in request.args and int(request.args["speed"]) != current_speed:
current_speed = request.args["speed"]
else:
current_speed = 0
# Decode Turn
if 'turn' in request.args and int(request.args["turn"]) != current_turn:
current_turn = request.args["turn"]
else:
current_turn = 0
# Signal To ros_thread That New Directions Have Been Received
write_event.set()
# Return Code 204
return ('', 204)
@app.route("/ping")
def ping():
global ping_time
ping_time = time.time()
return ('', 204)
def timeout_thread():
global ping_time, current_speed, current_turn, write_event, TIMEOUT
time.sleep(1) # We need to wait for the rospy node to initialize before running.
while not rospy.is_shutdown():
if (time.time() - ping_time) > TIMEOUT:
current_speed = 0
current_turn = 0
write_event.set()
time.sleep(0.1)
def ros_thread():
global current_speed, current_turn, write_event, direction
rospy.init_node('pi_drone_server', disable_signals=True)
while not rospy.is_shutdown():
write_event.wait()
msg = Twist()
msg.linear.x = float(current_speed)
msg.angular.z = float(current_turn)
direction.publish(msg)
write_event.clear()
def pi_drone_server():
"""Executable"""
threading.Thread(target=ros_thread).start()
threading.Thread(target=timeout_thread).start()
app.run(host="0.0.0.0", threaded=True)
| 2.515625 | 3 |
tests/test_reporter_advanced_ogr.py | dersteppenwolf/registrant | 42 | 12794480 | <reponame>dersteppenwolf/registrant<filename>tests/test_reporter_advanced_ogr.py
# -*- coding: UTF-8 -*-
"""Advanced tests generating html files for a geodatabase.
Geodatabase contains domains, tables, and feature classes.
This test case is supposed to be run with a Python installation
that have GDAL installed because OGR module is being used.
"""
from __future__ import print_function
import os
import unittest
import pkgutil
from context import (
registrant,
prepare_test,
PYTHON_VERSION,
NO_OGR_ENV_MESSAGE,
)
import html_parsers
########################################################################
class SimpleGeodatabase(unittest.TestCase):
"""Full test case with a complete geodatabase.
Geodatabase contains has domains, tables, and feature classes.
"""
# ---------------------------------------------------------------------
def setUp(self):
"""Set up the test context.
Create a file geodatabase from .xml schema file and
load .json look-up data.
"""
ogr_loader = pkgutil.find_loader('ogr')
if not ogr_loader:
self.skipTest(NO_OGR_ENV_MESSAGE)
self.in_gdb, self.out_report_folder, self.json_results = prepare_test(
'Advanced_ogr')
# ---------------------------------------------------------------------
def test_domains(self):
"""Test geodatabase report for domains."""
test_name = self.id().split('.')[-1]
self.reporter = registrant.Reporter(
gdb_path=self.in_gdb,
out_report_folder_path=os.path.join(self.out_report_folder,
test_name + PYTHON_VERSION),
)
self.reporter.domains2html()
print(self.reporter.report_file_path)
self.assertEqual(
html_parsers.parse_domains_from_html(
html_file=self.reporter.report_file_path,
json_file=self.json_results,
), (True, True))
# ---------------------------------------------------------------------
def test_tables(self):
"""Test geodatabase report for tables."""
test_name = self.id().split('.')[-1]
self.reporter = registrant.Reporter(
gdb_path=self.in_gdb,
out_report_folder_path=os.path.join(self.out_report_folder,
test_name + PYTHON_VERSION),
)
self.reporter.tables2html()
print(self.reporter.report_file_path)
self.assertEqual(
html_parsers.parse_tables_from_html(
html_file=self.reporter.report_file_path,
json_file=self.json_results,
), (True, True))
# ---------------------------------------------------------------------
def test_fcs(self):
"""Test geodatabase report for feature classes."""
test_name = self.id().split('.')[-1]
self.reporter = registrant.Reporter(
gdb_path=self.in_gdb,
out_report_folder_path=os.path.join(self.out_report_folder,
test_name + PYTHON_VERSION),
)
self.reporter.fcs2html()
print(self.reporter.report_file_path)
self.assertEqual(
html_parsers.parse_fcs_from_html(
html_file=self.reporter.report_file_path,
json_file=self.json_results,
), (True, True))
if __name__ == '__main__':
unittest.main()
| 2.28125 | 2 |
links.py | wbrpisarev/barefoot_weather_server | 0 | 12794481 | # -*- mode:python; coding:utf-8; -*-
from sqlalchemy import create_engine
from html_templates import html_begin, html_end, html_links_li
from mysql import mysql_connect_data
__all__ = ["links"]
def links(lang, connect_data=mysql_connect_data):
e = create_engine(connect_data)
if lang not in ("ru", "en"):
lang = "uk"
link_list = [row for row in
e.execute("select uri, desc_{l} from links".format(l=lang))]
html = html_begin
for record in link_list:
html += html_links_li.format(link=record[0], desc=record[1])
return html + html_end
| 2.65625 | 3 |
zepid/calc/__init__.py | darrenreger/zEpid | 0 | 12794482 | from .utils import (risk_ci, incidence_rate_ci, risk_ratio, risk_difference, number_needed_to_treat, odds_ratio,
incidence_rate_ratio, incidence_rate_difference, attributable_community_risk,
population_attributable_fraction, probability_to_odds, odds_to_probability, counternull_pvalue,
semibayes, sensitivity, specificity, ppv_converter, npv_converter, screening_cost_analyzer,
rubins_rules, s_value)
| 0.984375 | 1 |
storage/storage_item.py | DiPaolo/watchdog-yt-uploader | 0 | 12794483 | # represents items that used by Storage;
# can be either a directory or media file, or non-media file
import abc
import hashlib
import os
import uuid
from enum import Enum
class StorageItemStatus(Enum):
UNKNOWN = 'Unknown'
ON_TARGET = 'On Target'
UPLOADING = 'Uploading'
UPLOAD_FAILED = 'Upload Failed'
UPLOADED = 'Uploaded'
class StorageItem(abc.ABC):
def __init__(self, path: str, uuid_str: str = None):
self._uuid = uuid_str if uuid_str else str(uuid.uuid1())
self._path = os.path.abspath(path)
self._hash = None
self._status = StorageItemStatus.UNKNOWN
@abc.abstractmethod
def is_dir(self):
pass
def uuid(self):
return self._uuid
def path(self):
return self._path
def hash(self):
return self._hash
def status(self):
return self._status
def set_status(self, status: StorageItemStatus):
self._status = status
def _calc_hash(self):
if self._path is None or self._path == '':
self._hash = None
return
h = hashlib.sha256()
if not self.is_dir():
b = bytearray(256 * 1024)
mv = memoryview(b)
with open(self._path, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
self._hash = h.hexdigest()
else:
h.update(os.path.abspath(self._path))
digest = h.digest()
digest_str = ''
for b in digest:
digest_str += f'{b:02x}'
self._hash = digest_str
# class Directory(StorageItem):
# def __init__(self, path: str):
# super(Directory, self).__init__(path)
#
# def is_dir(self):
# return True
class File(StorageItem):
def __init__(self, path: str, uuid_str: str = None):
super(File, self).__init__(path, uuid_str)
def is_dir(self):
return False
class MediaFile(File):
def __init__(self, path: str, uuid_str: str = None):
super(MediaFile, self).__init__(path, uuid_str)
self._calc_hash()
| 2.8125 | 3 |
AES/AESCipherText.py | HolzerSoahita/Cracking_code_python | 1 | 12794484 | from aes import AES
from hmac import new as new_hmac, compare_digest
from hashlib import pbkdf2_hmac
import os
AES_KEY_SIZE = 16
HMAC_KEY_SIZE = 16
IV_SIZE = 16
SALT_SIZE = 16
HMAC_SIZE = 32
def get_key_iv(password, salt, workload=100000):
"""
Stretches the password and extracts an AES key, an HMAC key and an AES
initialization vector.
"""
stretched = pbkdf2_hmac('sha256', password, salt,
workload, AES_KEY_SIZE + IV_SIZE + HMAC_KEY_SIZE)
aes_key, stretched = stretched[:AES_KEY_SIZE], stretched[AES_KEY_SIZE:]
hmac_key, stretched = stretched[:HMAC_KEY_SIZE], stretched[HMAC_KEY_SIZE:]
iv = stretched[:IV_SIZE]
return aes_key, hmac_key, iv
def encrypt(key, plaintext, workload=100000):
"""
Encrypts `plaintext` with `key` using AES-128, an HMAC to verify integrity,
and PBKDF2 to stretch the given key.
The exact algorithm is specified in the module docstring.
"""
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(plaintext, str):
plaintext = plaintext.encode('utf-8')
salt = os.urandom(SALT_SIZE)
key, hmac_key, iv = get_key_iv(key, salt, workload)
ciphertext = AES(key).encrypt_cbc(plaintext, iv)
hmac = new_hmac(hmac_key, salt + ciphertext, 'sha256').digest()
assert len(hmac) == HMAC_SIZE
return hmac + salt + ciphertext
def decrypt(key, ciphertext, workload=100000):
"""
Decrypts `ciphertext` with `key` using AES-128, an HMAC to verify integrity,
and PBKDF2 to stretch the given key.
The exact algorithm is specified in the module docstring.
"""
assert len(
ciphertext) % 16 == 0, "Ciphertext must be made of full 16-byte blocks."
assert len(ciphertext) >= 32, """
Ciphertext must be at least 32 bytes long (16 byte salt + 16 byte block). To
encrypt or decrypt single blocks use `AES(key).decrypt_block(ciphertext)`.
"""
if isinstance(key, str):
key = key.encode('utf-8')
hmac, ciphertext = ciphertext[:HMAC_SIZE], ciphertext[HMAC_SIZE:]
salt, ciphertext = ciphertext[:SALT_SIZE], ciphertext[SALT_SIZE:]
key, hmac_key, iv = get_key_iv(key, salt, workload)
expected_hmac = new_hmac(hmac_key, salt + ciphertext, 'sha256').digest()
assert compare_digest(
hmac, expected_hmac), 'Ciphertext corrupted or tampered.'
return AES(key).decrypt_cbc(ciphertext, iv)
def benchmark():
key = b'P' * 16
message = b'M' * 16
aes = AES(key)
for i in range(30000):
aes.encrypt_block(message)
__all__ = [encrypt, decrypt, AES]
# Running the AES-128
if __name__ == '__main__':
key = 'master key'
message = 'a secret message'
ciphertext = encrypt(key, message)
print("Cipher text : {}".format(ciphertext))
plaintext = decrypt(key, ciphertext)
print("Plaintext : {}".format(str(plaintext, 'utf-8')))
| 3.296875 | 3 |
scripts/lastdata.py | winkste/rki2_scraper | 0 | 12794485 | celle_last_inc =121.53
noh_last_inc =73.25 | 0.605469 | 1 |
insta/tests.py | chriskaringeg/CRIMMZ-GRAM | 0 | 12794486 | from django.test import TestCase
from .models import Image,Profile
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTestCase(TestCase):
# SetUp method
def setUp(self):
#creating a user instance
self.user = User(username="chris",email="<EMAIL>",password="<PASSWORD>")
self.image = Profile(user=self.user,profile_avatar="ben_H62Kawu.jpeg",bio="Rolls-Royce Wraith")
def tearDown(self):
User.objects.all().delete()
Image.objects.all().delete()
# Testing Instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
def test_save_profile(self):
new_user = User(id=1,username="chris",email="<EMAIL>",password="<PASSWORD>")
new_user.save()
users = User.objects.all()
self.assertTrue(len(users)>=1)
def test_delete_profile(self):
new_user = User(id=1,username="chris",email="<EMAIL>",password="<PASSWORD>")
new_user.delete()
users = User.objects.all()
self.assertTrue(len(users)<=0)
class ImageTestCase(TestCase):
# SetUp method
def setUp(self):
#creating a user instance
self.user = User(username="chris",email="<EMAIL>",password="<PASSWORD>")
self.image = Image(image="default.jpg",tag_someone="ben2_2HRlWyC.jpeg",image_caption="ai at its best")
def tearDown(self):
User.objects.all().delete()
Image.objects.all().delete()
# Testing Instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
def test_save_image(self):
new_image =Image(image="default.jpg",tag_someone="ben2_2HRlWyC.jpeg",image_caption="ai at its best")
new_image.save()
images = Image.objects.all()
self.assertTrue(len(images)>=1)
def test_delete_image(self):
new_image =Image(id=1,image="default.jpg",tag_someone="ben2_2HRlWyC.jpeg",image_caption="ai at its best")
new_image.delete()
images = Image.objects.all()
self.assertTrue(len(images)==0) | 2.625 | 3 |
tests/test_dummy.py | rmflight/GOcats | 10 | 12794487 | import pytest
def test_run_script():
# run 1
# run 2
# assert
assert 1 == 1
| 1.625 | 2 |
metareserve_geni/internal/gni/py2/util/geni_util.py | MariskaIJpelaar/metareserve-GENI | 3 | 12794488 | import geni.util
def get_context():
try:
return geni.util.loadContext()
except IOError as e: # File not found. No credentials loaded?
print('ERROR: Could not load context: ', e)
print('''Are there any credentials available? If not, check
https://docs.emulab.net/geni-lib/intro/credentials.html
Specifically, for cloudlab users, use:
build-context --type cloudlab --cert <cert> --pubkey <pubkey> --project <name>
With e.g:
<cert> = ~/.ssh/cloudlab.pem (Cert must not be protected by a password!)
<pubkey> = ~/.ssh/geni.rsa.pub (Preferably without a password.)
<name> = skyhook (Note: must be all lowercase!)
''')
return None
| 2.28125 | 2 |
src/data/data_parsers.py | kanmaytacker/heart-net | 0 | 12794489 | <reponame>kanmaytacker/heart-net
import os
import h5py
from click import progressbar
import numpy as np
def read_skeleton_file(file_path: str):
with open(file_path, 'r') as f:
skeleton_sequence = {'numFrame': int(f.readline()), 'frameInfo': []}
for _ in range(skeleton_sequence['numFrame']):
frame_info = {'numBody': int(f.readline()), 'bodyInfo': []}
for _ in range(frame_info['numBody']):
body_info_key = [
'bodyID', 'clippedEdges', 'handLeftConfidence',
'handLeftState', 'handRightConfidence', 'handRightState',
'isResticted', 'leanX', 'leanY', 'trackingState'
]
body_info = {
k: float(v)
for k, v in zip(body_info_key, f.readline().split())
}
body_info['numJoint'] = int(f.readline())
body_info['jointInfo'] = []
for _ in range(body_info['numJoint']):
joint_info_key = [
'x', 'y', 'z', 'depthX', 'depthY', 'colorX', 'colorY',
'orientationW', 'orientationX', 'orientationY',
'orientationZ', 'trackingState'
]
joint_info = {
k: float(v)
for k, v in zip(joint_info_key, f.readline().split())
}
body_info['jointInfo'].append(joint_info)
frame_info['bodyInfo'].append(body_info)
skeleton_sequence['frameInfo'].append(frame_info)
return skeleton_sequence
def parse_skeleton_data(file_path: str, joints: int = 25, persons = 2) -> None:
sequence_data = read_skeleton_file(file_path)
data = np.zeros((3, sequence_data['numFrame'], joints, persons), dtype=np.float32)
for frame_number, frame in enumerate(sequence_data['frameInfo']):
for body_number, body in enumerate(frame['bodyInfo']):
for joint_number, joint in enumerate(body['jointInfo']):
if body_number < persons and joint_number < joints:
data[:, frame_number, joint_number, body_number] = [joint['x'], joint['y'], joint['z']]
return np.around(data, decimals=3)
def generate_skeleton_dataset(data_path: str, output_path: str) -> None:
target_file_path = f"{output_path}/skeleton.h5"
with h5py.File(target_file_path, "w") as target_file:
progress_bar = progressbar(iterable=None, length=len(next(os.walk(data_path))[2]))
for file_name in os.listdir(data_path):
sequence_name = os.path.splitext(file_name)[0]
skeleton_data = parse_skeleton_data(f"{data_path}/{file_name}")
f = open(output_path + "log.txt", "w+")
f.write(sequence_name)
f.write("\r\n")
f.close()
target_file.create_group(sequence_name).create_dataset("skeleton", data=skeleton_data)
progress_bar.update(1)
| 2.625 | 3 |
DataLoader.py | sgrieve/SpatialEFD | 2 | 12794490 | import numpy as np
def LoadData(FileName):
'''
Loads hollow data into structured numpy array of floats and returns a tuple
of column headers along with the structured array.
'''
data = np.genfromtxt(FileName, names=True, delimiter=',')
return data.dtype.names, data
def SegmentDataByAspect(FileName):
'''
Loads hollow data into structured numpy array of floats, and splits the
data into separate structured arrays by aspect band and returns a tuple
of column headers along with the structured arrays.
'''
Headers, A = LoadData(FileName)
NE = A[(A['Aspect'] >= 0) & (A['Aspect'] <= 85)]
SE = A[(A['Aspect'] > 85) & (A['Aspect'] <= 165)]
E = A[(A['Aspect'] >= 0) & (A['Aspect'] <= 165)]
W = A[(A['Aspect'] > 165)]
return Headers, NE, SE, E, W
def DataFilter(DataFile, Parameter, Value):
'''
Split hollows around Value of a given property. returns Small and
Large, two lists of IDs corresponding to hollows above and below the
median.
'''
Headers, A = LoadData(DataFile)
Small = A[(A[Parameter] < Value)]['ID']
Large = A[(A[Parameter] >= Value)]['ID']
return Small, Large
def VegDataFilter(DataFile):
'''
Split hollows into vegetation categories of a given property. returns
4 lists of IDs corresponding to specific vegetation types
'''
Headers, A = LoadData(DataFile)
a = A[(A['Veg'] == 1)]['ID']
b = A[(A['Veg'] == 2)]['ID']
c = A[(A['Veg'] == 3)]['ID']
d = A[(A['Veg'] == 4)]['ID']
return a, b, c, d
| 3.328125 | 3 |
fjlt/__init__.py | gabobert/fast-jlt | 8 | 12794491 | import os
from .version import __version__
def get_include():
''' Path of cython headers for compiling cython modules '''
return os.path.dirname(os.path.abspath(__file__))
| 1.640625 | 2 |
extra-packages/pyperl-1.0.1d/t/wantarray.py | UfSoft/ISPManCCP | 0 | 12794492 | <filename>extra-packages/pyperl-1.0.1d/t/wantarray.py
import perl
#if perl.MULTI_PERL:
# print "1..0"
# raise SystemExit
print "1..11"
perl.eval("""
sub foo {
if (wantarray) {
return "array";
}
elsif (defined wantarray) {
return "scalar";
}
else {
return;
}
}
""")
foo = perl.eval("\&foo")
testno = 1
def expect(res, expect):
global testno
if res != expect:
print "Expected", repr(expect), "got", repr(res)
print "not",
print "ok", testno
testno = testno + 1
void = None
scalar = "scalar"
array = ("array",)
expect(foo(), scalar)
expect(foo(__wantarray__ = 1), array)
expect(foo(__wantarray__ = None), void)
foo.__wantarray__ = 1;
expect(foo(), array)
expect(foo(__wantarray__ = 0), scalar)
expect(foo(__wantarray__ = None), void)
foo.__wantarray__ = None
expect(foo(), void)
expect(perl.call("foo"), scalar)
expect(perl.call_tuple("foo"), array)
expect(perl.call("foo", __wantarray__ = 1), array)
expect(perl.call_tuple("foo", __wantarray__ = 0), scalar)
| 2.453125 | 2 |
PythonExercicios/Mundo 2/8_estrutura_de_repeticao_for/ex050.py | GuilhermoCampos/Curso-Python3-curso-em-video | 0 | 12794493 | <gh_stars>0
d = 0
co = 0
for c in range(1, 7):
valor = int(input('Digite o {}º Valor: '.format(c)))
if valor % 2 == 0:
d += valor
co += 1
print('Você informou {} números PARES e a soma foi {}'.format(co, d))
| 3.53125 | 4 |
lib/datasets/interior_net.py | tim885/DeepDepthRefiner | 4 | 12794494 | import os
from os.path import join
import cv2
import pickle
import torch
import numpy as np
import pandas as pd
import torch.utils.data as data
class InteriorNet(data.Dataset):
def __init__(self, root_dir, label_name='_raycastingV2',
pred_dir='pred', method_name='sharpnet_pred',
gt_dir='data', depth_ext='-depth-plane.png', normal_ext='-normal.png', im_ext='-rgb.png',
label_dir='label', label_ext='-order-pix.npy'):
super(InteriorNet, self).__init__()
self.root_dir = root_dir
self.label_name = label_name
self.method_name = method_name
self.im_ext = im_ext
self.gt_dir = gt_dir
self.label_dir = label_dir
self.pred_dir = pred_dir
self.depth_ext = depth_ext
self.normal_ext = normal_ext
self.label_ext = label_ext
self.df = pd.read_csv(join(root_dir, 'InteriorNet.txt'))
def __len__(self):
return len(self.df)
def __getitem__(self, index):
depth_gt, depth_pred, label, normal, img = self._fetch_data(index)
depth_gt = torch.from_numpy(np.ascontiguousarray(depth_gt)).float().unsqueeze(0)
depth_pred = torch.from_numpy(np.ascontiguousarray(depth_pred)).float().unsqueeze(0)
label = torch.from_numpy(np.ascontiguousarray(label)).float().permute(2, 0, 1)
normal = torch.from_numpy(np.ascontiguousarray(normal)).float().permute(2, 0, 1)
img = torch.from_numpy(np.ascontiguousarray(img)).float().permute(2, 0, 1)
return depth_gt, depth_pred, label, normal, img
def _fetch_data(self, index):
# fetch predicted depth map in meters
depth_pred_path = join(self.root_dir, self.pred_dir, self.df.iloc[index]['scene'],
self.method_name, 'data', '{}.pkl'.format(self.df.iloc[index]['image']))
with open(depth_pred_path, 'rb') as f:
depth_pred = pickle.load(f)
# fetch ground truth depth map in meters
depth_gt_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.depth_ext))
if not os.path.exists(depth_gt_path):
print(depth_gt_path)
depth_gt = cv2.imread(depth_gt_path, -1) / 1000
# fetch normal map in norm-1 vectors
normal_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.normal_ext))
normal = cv2.imread(normal_path, -1) / (2 ** 16 - 1) * 2 - 1
normal = normal[:, :, ::-1]
# fetch rgb image
image_path = join(self.root_dir, self.gt_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.im_ext))
img = cv2.imread(image_path, -1) / 255
img = img[:, :, ::-1]
# fetch occlusion orientation labels
label_path = join(self.root_dir, self.label_dir,
'{}{}'.format(self.df.iloc[index]['scene'], self.label_name),
'{:04d}{}'.format(self.df.iloc[index]['image'], self.label_ext))
label = np.load(label_path)
return depth_gt, depth_pred, label, normal, img
if __name__ == "__main__":
root_dir = '/space_sdd/InteriorNet'
dataset = InteriorNet(root_dir)
print(len(dataset))
from tqdm import tqdm
from torch.utils.data import DataLoader
import sys
test_loader = DataLoader(dataset, batch_size=4, shuffle=False)
for i, data in tqdm(enumerate(test_loader)):
if i == 0:
print(data[0].shape, data[1].shape, data[2].shape, data[3].shape, data[4].shape)
sys.exit()
| 2.359375 | 2 |
gui/arena/arena_editor.py | Flipajs/FERDA | 1 | 12794495 | <gh_stars>1-10
__author__ = 'dita'
from PyQt4 import QtGui, QtCore
import cv2
import sys
import math
import numpy as np
from core.project.project import Project
from gui.img_controls import gui_utils
from gui.arena.my_ellipse import MyEllipse
from gui.arena.my_view import MyView
class ArenaEditor(QtGui.QDialog):
DEBUG = True
def __init__(self, img, project, finish_callback=None):
# TODO: add support for the original arena editor (circle)
np.set_printoptions(threshold=np.nan)
super(ArenaEditor, self).__init__()
self.finish_callback = finish_callback
self.setMouseTracking(True)
self.background = img
self.project = project
self.view = MyView(update_callback_move=self.mouse_moving, update_callback_press=self.mouse_press_event)
self.scene = QtGui.QGraphicsScene()
self.view.setScene(self.scene)
# background image
self.scene.addPixmap(gui_utils.cvimg2qtpixmap(self.background))
self.view.setMouseTracking(True)
# store the current paint mode "polygons" or "paint" or "circles"
self.mode = ""
self.color = "Hiding\nplaces"
# store last 10 QImages to support the "undo" function
# undo button can only be pushed in paint mode, but affects polygon painting too (all polygons are saved
# as one step)
self.backup = []
##########################
# PAINT MODE VARIABLES #
##########################
self.pen_size = 30
# image to store all progress
bg_height, bg_width = self.background.shape[:2]
bg_size = QtCore.QSize(bg_width, bg_height)
fmt = QtGui.QImage.Format_ARGB32
self.paint_image = QtGui.QImage(bg_size, fmt)
self.paint_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.paint_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(self.paint_image))
self.save()
##########################
# POLYGON MODE VARIABLES #
##########################
# all independent points (they are not yet part of any polygon)
self.point_items = [] # type MyEllipse[]
# holds colors of polygons "Red" or "Blue"
self.polygon_colors = [] # type string[]
# holds sets of all used points. Each list corresponds to one polygon
self.polygon_points = [] # type MyEllipse[][]
# temporary image to work with polygons
self.poly_image = QtGui.QImage(bg_size, fmt)
self.poly_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.poly_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(self.poly_image))
##########################
# CIRCLES MODE VARIABLES #
##########################
# TODO: add circles mode variables
# temporary image to work with circles
self.circles_image = QtGui.QImage(bg_size, fmt)
self.circles_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.circles_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(self.circles_image))
# create the main view and left panel with buttons
self.make_gui()
def switch_color(self):
text = self.sender().text()
if self.DEBUG:
print(("Setting color to %s" % text))
# make sure no other button stays pushed
for button in self.color_buttons:
if button.text() != text:
button.setChecked(False)
else:
button.setChecked(True)
self.color = text
def switch_mode(self):
value = self.sender().text()
if value == "Paint mode":
# don't do anything if paint mode is already active
if self.mode == "paint":
return
self.set_paint_mode()
elif value == "Polygon mode":
# don't do anything if polygons mode is already active
if self.mode == "polygons":
return
self.set_polygons_mode()
else:
# don't do anything if polygons mode is already active
if self.mode == "circles":
return
self.set_circles_mode()
def set_paint_mode(self):
# join the temporary images with paint image
self.save()
self.refresh_image(self.merge_images())
# clean the temporary images
self.clear_poly_image()
self.clear_circles_image()
# clean after polygon drawing
# - remove all points and polygons
self.remove_items()
# - clear the memory
self.point_items = []
self.polygon_points = []
self.polygon_colors = []
# TODO: add cleanup after circle drawing
self.mode = "paint"
# adjust widgets displayed in the left panel
self.poly_button.setVisible(False)
self.undo_button.setVisible(True)
self.slider.setVisible(True)
for button in self.color_buttons:
button.setVisible(True)
self.clear_button.setVisible(True)
self.popup_button.setVisible(True)
self.pen_label.setVisible(True)
self.circle_label.setVisible(False)
self.set_label_text()
def set_polygons_mode(self):
self.mode = "polygons"
# TODO: add cleanup after circle drawing
# cleanup after paint mode is not needed
self.poly_button.setVisible(True)
self.undo_button.setVisible(False)
self.slider.setVisible(False)
for button in self.color_buttons:
button.setVisible(True)
# hide "Eraser" button, there are no erasers in polygons mode
self.color_buttons[2].setVisible(False)
# in case "Eraser" was chosen as a color, switch it to blue
if self.color == "Eraser":
self.color = "Hiding\nplaces"
self.color_buttons[0].setChecked(True)
self.color_buttons[2].setChecked(False)
self.clear_button.setVisible(True)
self.popup_button.setVisible(True)
self.pen_label.setVisible(False)
self.circle_label.setVisible(False)
def set_circles_mode(self):
# join the temporary images with paint image
self.save()
self.refresh_image(self.merge_images())
# clean the temporary images
self.clear_poly_image()
self.clear_circles_image()
# clean after polygon drawing
# - remove all points and polygons
self.remove_items()
# - clear the memory
self.point_items = []
self.polygon_points = []
self.polygon_colors = []
# cleanup after paint mode is not needed
self.mode = "circles"
self.poly_button.setVisible(False)
self.undo_button.setVisible(False)
self.slider.setVisible(False)
for button in self.color_buttons:
button.setVisible(False)
self.clear_button.setVisible(False)
self.popup_button.setVisible(False)
self.pen_label.setVisible(False)
self.circle_label.setVisible(True)
def popup(self):
"""
converts image to numpy arrays
:return: tuple (arena_mask, occultation_mask)
True in arena_masks means that the point is INSIDE the arena
True in occultation_mask means that the point is NOT a place to hide (it is visible)
"""
img = self.merge_images()
bg_height, bg_width = self.background.shape[:2]
ptr = img.constBits()
ptr.setsize(img.byteCount())
img_arr = np.array(ptr).reshape(bg_height, bg_width, 4)
# Color values in image are formatted [R, G, B, A].
# To extract mask data, only Red (0) and Blue (2) channels are needed.
# Create arena mask: 0 - outside, 255 - inside
# 1) load RED channel (red color shows where outside of the arena is -> everything not red is inside)
# TODO: For some reason, color channels are full of 0, so they can't be used to create a mask. Mysteriously,
# TODO: alpha channel seems to be working just fine. Arena mask is now working with alpha data, but
# TODO: occultation mask cannot be used at the same time. (they both share the same alpha channel)
arena_mask = np.array(img_arr[:,:,3], dtype="uint8")
np.set_printoptions(threshold=np.nan)
# 2) set all pixels that contain at least a little red to 1 (temporary)
arena_mask[arena_mask > 0] = 1
# 3) set all pixels with no color (arena inside) to 255
arena_mask[arena_mask == 0] = 255
# 4) set all red pixels (value 1) to 0
arena_mask[arena_mask == 1] = 0
# invert mask
arena_mask = np.invert(arena_mask)
"""
# Create occlusion mask: 0 - occultation, 255 - visible spot
# 1) load BLUE channel (blue color shows where occultation is -> everything not blue is visible or outside of the arena)
occultation_mask = np.array(img_arr[:,:,2], dtype="uint8")
# 2) set all pixels that contain at least a little blue to 1 (temporary)
occultation_mask[occultation_mask > 0] = 1
# 3) set all pixels with no color (arena inside) to 255
occultation_mask[occultation_mask == 0] = 255
# 4) set all blue pixels (value 1) to 0
occultation_mask[occultation_mask == 1] = 0
"""
if self.finish_callback:
# TODO: fix this so two different masks can be used
# self.finish_callback(arena_mask, occultation_mask)
self.finish_callback(arena_mask, None)
else:
# print label(arena_mask, connectivity=2)
# TODO: label seems to be working well, uncomment later
return arena_mask, None
def change_pen_size(self, value):
"""
change pen size
:param value: new pen size
:return: None
"""
# change pen size
self.pen_size = value
# refresh text in QLabel
self.set_label_text()
def set_label_text(self):
"""
changes the label to show current pen settings
:return: None
"""
if self.mode == "paint":
self.pen_label.setText("Pen size: %s" % self.pen_size)
def remove_items(self):
"""
remove all points from polygons mode from the scene
:return:
"""
# erase all points from polygons
for point_items in self.polygon_points:
for point in point_items:
self.scene.removeItem(point)
# erase all independent points
for point in self.point_items:
self.scene.removeItem(point)
def reset(self):
"""
clear everything and start over
:return: None
"""
self.remove_items()
self.clear_poly_image()
self.clear_paint_image()
self.point_items = []
self.polygon_points = []
self.polygon_colors = []
def mouse_press_event(self, event):
# get event position and calibrate to scene
pos = self.get_event_pos(event)
if not pos:
# if pos isn't in the scene
return
if self.mode == "polygons":
# in the polygons mode, try to pick one point
precision = 20
ok = True
for pt in self.point_items:
# check if the clicked pos isn't too close to any other already chosen point
dist = self.get_distance(pt, pos)
if dist < precision:
print("Too close")
ok = False
for points in self.polygon_points:
for pt in points:
dist = self.get_distance(pt, pos)
if dist < precision:
print("Too close2")
ok = False
if ok:
self.point_items.append(self.pick_point(pos))
elif self.mode == "paint":
# in the paint mode, paint the event position
self.save()
self.draw(pos)
def mouseReleaseEvent(self, event):
self.save()
def mouse_moving(self, event):
if self.mode == "paint":
point = self.get_event_pos(event)
if point:
# if point is in the scene
self.draw(point)
# do nothing in "polygons" mode
def get_event_pos(self, event):
point = self.view.mapToScene(event.pos()).toPoint()
if self.is_in_scene(point):
return point
else:
return False
def save(self):
"""
Saves current image temporarily (to use with "undo()" later)
:return:
"""
# save last 10 images
img = self.paint_image.copy()
self.backup.append(img)
if len(self.backup) > 10:
self.backup.pop(0)
def undo(self):
if self.mode == "paint":
lenght = len(self.backup)
if lenght > 0:
img = self.backup.pop(lenght-1)
self.refresh_image(img)
def refresh_image(self, img):
self.paint_image = img
self.scene.removeItem(self.paint_pixmap)
self.paint_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(img))
def refresh_poly_image(self):
self.scene.removeItem(self.poly_pixmap)
self.poly_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(self.poly_image))
def refresh_circles_image(self):
self.scene.removeItem(self.circles_pixmap)
self.circles_pixmap = self.scene.addPixmap(QtGui.QPixmap.fromImage(self.circles_image))
def clear_paint_image(self):
# remove all drawn lines
self.paint_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.refresh_image(self.paint_image)
def clear_poly_image(self):
# remove all drawn lines
self.poly_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.refresh_poly_image()
def clear_circles_image(self):
# remove all drawn lines
self.circles_image.fill(QtGui.qRgba(0, 0, 0, 0))
self.refresh_circles_image()
def draw(self, point):
"""
paint a point with a pen in paint mode
:param point: point to be drawn
:return: None
"""
# change float to int (QPointF -> QPoint)
if type(point) == QtCore.QPointF:
point = point.toPoint()
# use current pen color
if self.color == "Hiding\nplaces":
value = QtGui.qRgba(0, 0, 255, 100)
elif self.color == "Outside of\nthe arena":
value = QtGui.qRgba(255, 0, 0, 100)
else:
value = QtGui.qRgba(0, 0, 0, 0)
# paint the area around the point position
bg_height, bg_width = self.background.shape[:2]
for i in range(point.x() - self.pen_size/2, point.x() + self.pen_size/2):
for j in range(point.y() - self.pen_size/2, point.y() + self.pen_size/2):
if i >= 0 and i < bg_width and j > 0 and j <= bg_height:
try:
self.paint_image.setPixel(i, j, value)
except:
pass
# set new image and pixmap
self.refresh_image(self.paint_image)
def get_distance(self, pt_a, pt_b):
"""
simple method that returns the distance of two points (A, B)
:param pt_a: Point A
:param pt_b: Point B
:return: float distance
"""
return math.sqrt((pt_b.x() - pt_a.x()) ** 2 + (pt_b.y() - pt_a.y()) ** 2)
def pick_point(self, position):
"""
create a point that the user has chosen to be a future part of a polygon
:param position:
:return: QGraphicsItem (from MyEllipse)
"""
# picks and marks a point in the polygon mode
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
ellipse = MyEllipse(update_callback=self.repaint_polygons)
ellipse.setBrush(brush)
ellipse.setPos(QtCore.QPoint(position.x(), position.y()))
self.scene.addItem(ellipse)
return ellipse
def paint_polygon(self):
"""
tries to create a new polygon from currently selected points (MyEllipses)
:return: bool, whether the polygon was drawn
"""
# check if polygon can be created
if len(self.point_items) > 2:
if self.DEBUG:
print("Polygon complete, drawing it")
# create the polygon
polygon = QtGui.QPolygonF()
for el in self.point_items:
# use all selected points
polygon.append(QtCore.QPointF(el.x(), el.y()))
# draw the polygon and save it's color
self.paint_polygon_(polygon, self.color)
self.polygon_colors.append(self.color)
# store all the points (ellipses), too
self.polygon_points.append(self.point_items)
# clear temporary points' storage
self.point_items = []
return True
else:
if self.DEBUG:
print("Polygon is too small, pick at least 3 points")
return False
def paint_polygon_(self, polygon, color):
"""
paints a polygon, when it's color and position are known
:param polygon: QPolygonF to be painted
:param color: "Red" or "Blue"
:return: None
"""
# setup the painter
painter = QtGui.QPainter()
painter.begin(self.poly_image)
brush = QtGui.QBrush()
# paint the polygon
if color == "Outside of\nthe arena":
qc = QtGui.QColor(255, 0, 0, 100)
else:
qc = QtGui.QColor(0, 0, 255, 100)
pen = QtGui.QPen(qc)
brush.setColor(qc)
brush.setStyle(QtCore.Qt.SolidPattern)
painter.setBrush(brush)
painter.setPen(pen)
painter.drawPolygon(polygon)
painter.end()
# refresh the image
self.refresh_poly_image()
def repaint_polygons(self):
"""
repaints all the polygons that are now changeable.
:return: None
"""
# clear the canvas
self.remove_items()
self.clear_poly_image()
tmp_ellipses = []
tmp_points = []
# go through all saved points and recreate the polygons according to the new points' position
i = 0
for points in self.polygon_points:
polygon = QtGui.QPolygonF()
tmp_ellipse = []
for point in points:
qpt = QtCore.QPointF(point.x(), point.y())
polygon.append(qpt)
tmp_ellipse.append(self.pick_point(qpt))
self.paint_polygon_(polygon, self.polygon_colors[i])
i += 1
tmp_ellipses.append(tmp_ellipse)
self.polygon_points = tmp_ellipses
for point in self.point_items:
pos = QtCore.QPoint(point.x(), point.y())
tmp_points.append(self.pick_point(pos))
self.point_items = tmp_points
def merge_images(self):
"""
merges the 3 images (paint, polygons and circles) into one result
:return: the final image
"""
bg_height, bg_width = self.background.shape[:2]
bg_size = QtCore.QSize(bg_width, bg_height)
fmt = QtGui.QImage.Format_ARGB32
result = QtGui.QImage(bg_size, fmt)
result.fill(QtGui.qRgba(0, 0, 0, 0))
p = QtGui.QPainter()
p.begin(result)
p.drawImage(0, 0, self.poly_image)
p.drawImage(0, 0, self.paint_image)
p.drawImage(0, 0, self.circles_image)
p.end()
return result
def is_in_scene(self, point):
"""
checks if the point is inside the scene
:param point: Qpoint or QPointF
:return: True or False
"""
height, width = self.background.shape[:2]
if self.scene.itemsBoundingRect().contains(point) and point.x() <= width and point.y() <= height:
return True
else:
return False
def make_gui(self):
"""
Creates the widget. It is a separate method purely to save space
:return: None
"""
##########################
# GUI #
##########################
self.setLayout(QtGui.QHBoxLayout())
self.layout().setAlignment(QtCore.Qt.AlignBottom)
# left panel widget
widget = QtGui.QWidget()
widget.setLayout(QtGui.QVBoxLayout())
widget.layout().setAlignment(QtCore.Qt.AlignTop)
# set left panel widget width to 300px
widget.setMaximumWidth(300)
widget.setMinimumWidth(300)
label = QtGui.QLabel()
label.setWordWrap(True)
label.setText("Welcome to arena editor! Paint the outside of the arena with red and use blue to mark possible"
" hiding places. Unresolvable colors will be considered red.")
widget.layout().addWidget(label)
# SWITCH button and key shortcut
mode_switch_group = QtGui.QButtonGroup(widget)
polymode_button = QtGui.QRadioButton("Polygon mode")
mode_switch_group.addButton(polymode_button)
polymode_button.toggled.connect(self.switch_mode)
widget.layout().addWidget(polymode_button)
paintmode_button = QtGui.QRadioButton("Paint mode")
mode_switch_group.addButton(paintmode_button)
paintmode_button.toggled.connect(self.switch_mode)
widget.layout().addWidget(paintmode_button)
circlemode_button = QtGui.QRadioButton("Circles mode")
mode_switch_group.addButton(circlemode_button)
circlemode_button.toggled.connect(self.switch_mode)
widget.layout().addWidget(circlemode_button)
# color switcher widget
color_widget = QtGui.QWidget()
color_widget.setLayout(QtGui.QHBoxLayout())
self.color_buttons = []
blue_button = QtGui.QPushButton("Hiding\nplaces")
blue_button.setCheckable(True)
blue_button.setChecked(True)
blue_button.clicked.connect(self.switch_color)
color_widget.layout().addWidget(blue_button)
self.color_buttons.append(blue_button)
red_button = QtGui.QPushButton("Outside of\nthe arena")
red_button.setCheckable(True)
red_button.clicked.connect(self.switch_color)
color_widget.layout().addWidget(red_button)
self.color_buttons.append(red_button)
eraser_button = QtGui.QPushButton("Eraser")
eraser_button.setCheckable(True)
eraser_button.clicked.connect(self.switch_color)
color_widget.layout().addWidget(eraser_button)
self.color_buttons.append(eraser_button)
widget.layout().addWidget(color_widget)
self.pen_label = QtGui.QLabel()
self.pen_label.setWordWrap(True)
self.pen_label.setText("")
widget.layout().addWidget(self.pen_label)
self.circle_label = QtGui.QLabel()
self.circle_label.setWordWrap(True)
self.circle_label.setText("Sorry, not supported yet")
widget.layout().addWidget(self.circle_label)
# PEN SIZE slider
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setFocusPolicy(QtCore.Qt.NoFocus)
self.slider.setGeometry(30, 40, 50, 30)
self.slider.setRange(3, 50)
self.slider.setTickInterval(3)
self.slider.setValue(30)
self.slider.setTickPosition(QtGui.QSlider.TicksBelow)
self.slider.valueChanged[int].connect(self.change_pen_size)
self.slider.setVisible(False)
widget.layout().addWidget(self.slider)
# UNDO key shortcut
self.action_undo = QtGui.QAction('undo', self)
self.action_undo.triggered.connect(self.undo)
self.action_undo.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Z))
self.addAction(self.action_undo)
self.undo_button = QtGui.QPushButton("Undo\n(key Z)")
self.undo_button.clicked.connect(self.undo)
widget.layout().addWidget(self.undo_button)
# DRAW button and key shortcut
self.action_paint_polygon = QtGui.QAction('paint_polygon', self)
self.action_paint_polygon.triggered.connect(self.paint_polygon)
self.action_paint_polygon.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_D))
self.addAction(self.action_paint_polygon)
self.poly_button = QtGui.QPushButton("Draw polygons\n(key D)")
self.poly_button.clicked.connect(self.paint_polygon)
widget.layout().addWidget(self.poly_button)
# CLEAR button and key shortcut
self.action_clear = QtGui.QAction('clear', self)
self.action_clear.triggered.connect(self.reset)
self.action_clear.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_C))
self.addAction(self.action_clear)
self.clear_button = QtGui.QPushButton("Clear paint area\n(key C)")
self.clear_button.clicked.connect(self.reset)
widget.layout().addWidget(self.clear_button)
self.popup_button = QtGui.QPushButton("Done!")
self.popup_button.clicked.connect(self.popup)
widget.layout().addWidget(self.popup_button)
self.set_label_text()
paintmode_button.toggle()
# complete the gui
self.layout().addWidget(widget)
self.layout().addWidget(self.view)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
# im = cv2.imread('/home/dita/PycharmProjects/sample2.png')
im = cv2.imread('/Users/flipajs/Desktop/red_vid.png')
p = Project()
ex = ArenaEditor(im, p)
ex.show()
ex.move(-500, -500)
ex.showMaximized()
ex.setFocus()
app.exec_()
app.deleteLater()
sys.exit()
| 2.203125 | 2 |
tests/test_fitter.py | jmeyers314/danish | 0 | 12794496 | <reponame>jmeyers314/danish
import os
import pickle
import time
from scipy.optimize import least_squares
import numpy as np
import batoid
import danish
from test_helpers import timer
directory = os.path.dirname(__file__)
LSST_obsc_radii = {
'M1_inner': 2.5580033095346875,
'M2_outer': 4.502721059044802,
'M2_inner': 2.3698531889709487,
'M3_outer': 5.4353949343626216,
'M3_inner': 1.1919725733251365,
'L1_entrance': 7.692939426566589,
'L1_exit': 8.103064894823262,
'L2_entrance': 10.746925431763076,
'L2_exit': 11.548732622162085,
'Filter_entrance': 28.06952057721957,
'Filter_exit': 30.895257933242576,
'L3_entrance': 54.5631834759912,
'L3_exit': 114.76715786850136
}
LSST_obsc_motion = {
'M1_inner': 0.1517605552388959,
'M2_outer': 16.818667026561727,
'M2_inner': 16.818667026561727,
'M3_outer': 53.2113063872138,
'M3_inner': 53.2113063872138,
'L1_entrance': 131.69949884635324,
'L1_exit': 137.51151184228345,
'L2_entrance': 225.63931108752732,
'L2_exit': 236.8641351903567,
'Filter_entrance': 801.6598843836333,
'Filter_exit': 879.4647343264201,
'L3_entrance': 1594.7432961792515,
'L3_exit': 3328.637595923783
}
AuxTel_obsc_radii = {
'Baffle_M2c_inner': 0.2115
}
AuxTel_obsc_motion = {
'Baffle_M2c_inner': -2.7000030360993734
}
def plot_result(img, mod, z_fit, z_true, ylim=None):
jmax = len(z_fit)+4
import matplotlib.pyplot as plt
fig = plt.figure(constrained_layout=True, figsize=(10, 7))
gs = fig.add_gridspec(2, 3)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[0, 2])
ax3 = fig.add_subplot(gs[1, :])
ax0.imshow(img/np.sum(img))
ax1.imshow(mod/np.sum(mod))
ax2.imshow(img/np.sum(img) - mod/np.sum(mod))
ax3.axhline(0, c='k')
ax3.plot(np.arange(4, jmax), z_fit, c='b', label='fit')
ax3.plot(np.arange(4, jmax), z_true, c='k', label='truth')
ax3.plot(
np.arange(4, jmax),
(z_fit-z_true),
c='r', label='fit - truth'
)
if ylim is None:
ylim = -0.6, 0.6
ax3.set_ylim(*ylim)
ax3.set_xlabel("Zernike index")
ax3.set_ylabel("Residual (Waves)")
ax3.set_xticks(np.arange(4, jmax, dtype=int))
ax3.legend()
plt.show()
def plot_dz_results(imgs, mods, dz_fit, dz_true, dz_terms):
import matplotlib.pyplot as plt
fig = plt.figure(constrained_layout=True, figsize=(10, 12))
gs = fig.add_gridspec(len(imgs)+3, 3)
for i, (img, mod) in enumerate(zip(imgs, mods)):
ax0 = fig.add_subplot(gs[i, 0])
ax1 = fig.add_subplot(gs[i, 1])
ax2 = fig.add_subplot(gs[i, 2])
ax0.imshow(img/np.sum(img))
ax1.imshow(mod/np.sum(mod))
ax2.imshow(img/np.sum(img) - mod/np.sum(mod))
ax3 = fig.add_subplot(gs[-3:, :])
ax3.axhline(0, c='k', alpha=0.1)
ax3.plot(dz_fit, c='b', label='fit')
ax3.plot(dz_true, c='k', label='truth')
ax3.plot(
(dz_fit-dz_true),
c='r', label='fit - truth'
)
ax3.set_ylim(-0.6, 0.6)
ax3.set_xlabel("Double Zernike index")
ax3.set_ylabel("Residual (Waves)")
ax3.set_xticks(range(len(dz_terms)))
ax3.set_xticklabels(dz_terms)
ax3.legend()
plt.show()
@timer
def test_fitter_LSST_fiducial():
""" Roundtrip using danish model to produce a test image with fiducial LSST
transverse Zernikes plus random Zernike offsets. Model and fitter run
through the same code.
"""
telescope = batoid.Optic.fromYaml("LSST_i.yaml")
telescope = telescope.withGloballyShiftedOptic("Detector", [0, 0, 0.0015])
wavelength = 750e-9
rng = np.random.default_rng(234)
if __name__ == "__main__":
niter = 10
else:
niter = 2
for _ in range(niter):
thr = np.sqrt(rng.uniform(0, 1.8**2))
ph = rng.uniform(0, 2*np.pi)
thx, thy = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_ref = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_ref *= wavelength
z_terms = np.arange(4, 23)
z_true = rng.uniform(-0.1, 0.1, size=19)*wavelength
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref, z_terms=z_terms, thx=thx, thy=thy
)
dx, dy = rng.uniform(-0.5, 0.5, size=2)
fwhm = rng.uniform(0.5, 1.5)
sky_level = 1000.0
img = fitter.model(
dx, dy, fwhm, z_true,
sky_level=sky_level, flux=5e6
)
guess = [0.0, 0.0, 0.7]+[0.0]*19
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
for i in range(4, 23):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(dx_fit, dx, rtol=0, atol=5e-2)
np.testing.assert_allclose(dy_fit, dy, rtol=0, atol=5e-2)
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=0.05*wavelength)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms < 0.1, "rms %9.3f > 0.1" % rms
@timer
def test_fitter_LSST_rigid_perturbation():
"""Roundtrip using danish model to produce a test image of rigid-body
perturbed LSST transverse Zernikes. Model and fitter run through the same
code.
"""
fiducial_telescope = batoid.Optic.fromYaml("LSST_i.yaml")
fiducial_telescope = fiducial_telescope.withGloballyShiftedOptic(
"Detector",
[0, 0, 0.0015]
)
wavelength = 750e-9
rng = np.random.default_rng(234)
if __name__ == "__main__":
niter = 10
else:
niter = 2
for _ in range(niter):
M2_dx, M2_dy = rng.uniform(-2e-4, 2e-4, size=2)
M2_dz = rng.uniform(-2e-5, 2e-5)
M2_thx, M2_thy = rng.uniform(-2e-5, 2e-5, size=2)
cam_dx, cam_dy = rng.uniform(-2e-3, 2e-3, size=2)
cam_dz = rng.uniform(-2e-5, 2e-5)
cam_thx, cam_thy = rng.uniform(-2e-4, 2e-4, size=2)
telescope = (
fiducial_telescope
.withGloballyShiftedOptic("M2", [M2_dx, M2_dy, M2_dz])
.withLocallyRotatedOptic(
"M2", batoid.RotX(M2_thx)@batoid.RotY(M2_thy)
)
.withGloballyShiftedOptic("LSSTCamera", [cam_dx, cam_dy, cam_dz])
.withLocallyRotatedOptic(
"LSSTCamera", batoid.RotX(cam_thx)@batoid.RotY(cam_thy)
)
)
thr = np.sqrt(rng.uniform(0, 1.8**2))
ph = rng.uniform(0, 2*np.pi)
thx, thy = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_ref = batoid.zernikeTA(
fiducial_telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_ref *= wavelength
z_perturb = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_perturb *= wavelength
z_terms = np.arange(4, 23)
z_true = (z_perturb - z_ref)[4:23]
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref, z_terms=z_terms, thx=thx, thy=thy
)
dx, dy = 0.0, 0.0
fwhm = 0.7
sky_level = 1000.0
img = fitter.model(
dx, dy, fwhm, z_true,
sky_level=sky_level, flux=5e6
)
guess = [0.0, 0.0, 0.7]+[0.0]*19
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
for i in range(4, 23):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(dx_fit, dx, rtol=0, atol=5e-2)
np.testing.assert_allclose(dy_fit, dy, rtol=0, atol=5e-2)
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=0.05*wavelength)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms < 0.1, "rms %9.3f > 0.1" % rms
@timer
def test_fitter_LSST_z_perturbation():
"""Roundtrip using danish model to produce a test image of rigid-body +
M1-surface-Zernike perturbed LSST transverse Zernikes. Model and fitter run
through the same code.
"""
fiducial_telescope = batoid.Optic.fromYaml("LSST_i.yaml")
fiducial_telescope = fiducial_telescope.withGloballyShiftedOptic(
"Detector",
[0, 0, 0.0015]
)
wavelength = 750e-9
rng = np.random.default_rng(234)
if __name__ == "__main__":
niter = 10
else:
niter = 2
for _ in range(niter):
M2_dx, M2_dy = rng.uniform(-2e-4, 2e-4, size=2)
M2_dz = rng.uniform(-2e-5, 2e-5)
M2_thx, M2_thy = rng.uniform(-2e-5, 2e-5, size=2)
cam_dx, cam_dy = rng.uniform(-2e-3, 2e-3, size=2)
cam_dz = rng.uniform(-2e-5, 2e-5)
cam_thx, cam_thy = rng.uniform(-2e-4, 2e-4, size=2)
telescope = (
fiducial_telescope
.withGloballyShiftedOptic("M2", [M2_dx, M2_dy, M2_dz])
.withLocallyRotatedOptic(
"M2", batoid.RotX(M2_thx)@batoid.RotY(M2_thy)
)
.withGloballyShiftedOptic("LSSTCamera", [cam_dx, cam_dy, cam_dz])
.withLocallyRotatedOptic(
"LSSTCamera", batoid.RotX(cam_thx)@batoid.RotY(cam_thy)
)
)
M1 = telescope['M1']
M1_a = np.zeros(23)
M1_a[12:23] = rng.uniform(-20e-9, 20e-9, 11)
telescope = telescope.withSurface(
"M1",
batoid.Sum([
M1.surface,
batoid.Zernike(
M1_a,
R_outer=M1.obscuration.original.outer,
R_inner=M1.obscuration.original.inner
)
])
)
thr = np.sqrt(rng.uniform(0, 1.8**2))
ph = rng.uniform(0, 2*np.pi)
thx, thy = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_ref = batoid.zernikeTA(
fiducial_telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_ref *= wavelength
z_perturb = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_perturb *= wavelength
z_terms = np.arange(4, 23)
z_true = (z_perturb - z_ref)[4:23]
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref, z_terms=z_terms, thx=thx, thy=thy
)
dx, dy = 0.0, 0.0
fwhm = 0.7
sky_level = 1000.0
img = fitter.model(
dx, dy, fwhm, z_true,
sky_level=sky_level, flux=5e6
)
guess = [0.0, 0.0, 0.7]+[0.0]*19
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
for i in range(4, 23):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(dx_fit, dx, rtol=0, atol=5e-2)
np.testing.assert_allclose(dy_fit, dy, rtol=0, atol=5e-2)
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=0.05*wavelength)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms < 0.1, "rms %9.3f > 0.02" % rms
@timer
def test_fitter_LSST_kolm():
"""Roundtrip using GalSim Kolmogorov atmosphere + batoid to produce test
image of AOS DOF perturbed optics. Model and fitter run independent code.
"""
with open(
os.path.join(directory, "data", "test_kolm_donuts.pkl"),
'rb'
) as f:
data = pickle.load(f)
sky_level = data[0]['sky_level']
wavelength = data[0]['wavelength']
fwhm = data[0]['fwhm']
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
binned_factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=20e-6
)
if __name__ == "__main__":
niter = 10
else:
niter = 2
for datum in data[1:niter]:
thx = datum['thx']
thy = datum['thy']
z_ref = datum['z_ref']
z_actual = datum['z_actual']
img = datum['arr'][::-1, ::-1]
z_terms = np.arange(4, 23)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref*wavelength, z_terms=z_terms, thx=thx, thy=thy
)
guess = [0.0, 0.0, 0.7] + [0.0]*19
t0 = time.time()
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
t1 = time.time()
t1x1 = t1 - t0
z_true = (z_actual-z_ref)[4:23]*wavelength
for i in range(4, 23):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength)
# One fit is problematic. It has a large field angle, so flip based on
# that.
if np.rad2deg(np.hypot(thx, thy)) > 1.7:
tol = 0.7
else:
tol = 0.25
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=tol*wavelength)
rms1x1 = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms1x1 < 2*tol, "rms %9.3f > %9.3" % (rms1x1, tol)
# Try binning 2x2
binned_fitter = danish.SingleDonutModel(
binned_factory, z_ref=z_ref*wavelength, z_terms=z_terms,
thx=thx, thy=thy, npix=89
)
binned_img = img[:-1,:-1].reshape(90,2,90,2).mean(-1).mean(1)[:-1,:-1]
t0 = time.time()
binned_result = least_squares(
binned_fitter.chi, guess, jac=binned_fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(binned_img, 4*sky_level)
)
t1 = time.time()
t2x2 = t1 - t0
dx_fit, dy_fit, fwhm_fit, *z_fit = binned_result.x
z_fit = np.array(z_fit)
# mod = binned_fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(binned_img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=tol*wavelength)
rms2x2 = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms2x2 < 2*tol, "rms %9.3f > %9.3" % (rms2x2, tol)
print("\n"*4)
print(f"1x1 fit time: {t1x1:.3f} sec")
print(f"2x2 fit time: {t2x2:.3f} sec")
print(f"1x1 rms: {rms1x1}")
print(f"2x2 rms: {rms2x2}")
print("\n"*4)
@timer
def test_fitter_LSST_atm():
"""Roundtrip using GalSim phase screen atmosphere + batoid to produce test
image of AOS DOF perturbed optics. Model and fitter run independent code.
"""
with open(
os.path.join(directory, "data", "test_atm_donuts.pkl"),
'rb'
) as f:
data = pickle.load(f)
wavelength = data[0]['wavelength']
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
binned_factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=20e-6
)
sky_level = data[0]['sky_level']
if __name__ == "__main__":
niter = 10
else:
niter = 2
for datum in data[1:niter]:
thx = datum['thx']
thy = datum['thy']
z_ref = datum['z_ref']
z_actual = datum['z_actual']
img = datum['arr'][::-1, ::-1]
z_terms = np.arange(4, 23)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref*wavelength, z_terms=z_terms, thx=thx, thy=thy
)
guess = [0.0, 0.0, 0.7] + [0.0]*19
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
z_true = (z_actual-z_ref)[4:23]*wavelength
for i in range(4, 23):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(
z_fit/wavelength, z_true/wavelength,
rtol=0, atol=0.5
)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.6, "rms %9.3f > 0.6" % rms
# Try binning 2x2
binned_fitter = danish.SingleDonutModel(
binned_factory, z_ref=z_ref*wavelength, z_terms=z_terms,
thx=thx, thy=thy, npix=89
)
binned_img = img[:-1,:-1].reshape(90,2,90,2).mean(-1).mean(1)[:-1,:-1]
t0 = time.time()
binned_result = least_squares(
binned_fitter.chi, guess, jac=binned_fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(binned_img, 4*sky_level)
)
t1 = time.time()
print(f"2x2 fit time: {t1-t0:.3f} sec")
dx_fit, dy_fit, fwhm_fit, *z_fit = binned_result.x
z_fit = np.array(z_fit)
# mod = binned_fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(binned_img, mod, z_fit/wavelength, z_true/wavelength)
np.testing.assert_allclose(
z_fit/wavelength,
z_true/wavelength,
rtol=0, atol=0.5
)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.6, "rms %9.3f > 0.6" % rms
@timer
def test_fitter_AuxTel_rigid_perturbation():
"""Roundtrip using danish model to produce a test image of rigid-body
perturbed AuxTel transverse Zernikes. Model and fitter run through the same
code.
"""
# Nominal donut mode for AuxTel is to despace M2 by 0.8 mm
fiducial_telescope = batoid.Optic.fromYaml("AuxTel.yaml")
fiducial_telescope = fiducial_telescope.withLocallyShiftedOptic(
"M2",
[0, 0, 0.0008]
)
wavelength = 750e-9
rng = np.random.default_rng(234)
if __name__ == "__main__":
niter = 10
else:
niter = 2
for _ in range(niter):
# Randomly perturb M2 alignment
M2_dx, M2_dy = rng.uniform(-3e-4, 3e-4, size=2)
M2_dz = rng.uniform(-3e-5, 3e-5)
M2_thx, M2_thy = rng.uniform(-3e-5, 3e-5, size=2)
telescope = (
fiducial_telescope
.withGloballyShiftedOptic("M2", [M2_dx, M2_dy, M2_dz])
.withLocallyRotatedOptic(
"M2", batoid.RotX(M2_thx)@batoid.RotY(M2_thy)
)
)
# Random point inside 0.05 degree radius field-of-view
thr = np.sqrt(rng.uniform(0, 0.05**2))
ph = rng.uniform(0, 2*np.pi)
thx, thy = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
# Determine reference "design" zernikes. Use the transverse aberration
# zernikes since danish uses a transverse aberration ray-hit model.
z_ref = batoid.zernikeTA(
fiducial_telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=11, eps=0.2115/0.6
)
z_ref *= wavelength
# The zernikes of the perturbed telescope. I.e., the "truth".
z_perturb = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=11, eps=0.2115/0.6
)
z_perturb *= wavelength
z_terms = np.arange(4, 12)
z_true = (z_perturb - z_ref)[4:12]
# NOTE: The R_inner and focal_length here don't quite match what I've
# seen elsewhere. Possible location for future improvement.
factory = danish.DonutFactory(
R_outer=0.6, R_inner=0.2115,
obsc_radii=AuxTel_obsc_radii, obsc_motion=AuxTel_obsc_motion,
focal_length=20.8, pixel_scale=10e-6
)
fitter = danish.SingleDonutModel(
factory, z_ref=z_ref, z_terms=z_terms, thx=thx, thy=thy, npix=255
)
dx, dy = 0.0, 0.0
fwhm = 0.7 # Arcsec for Kolmogorov profile
sky_level = 1000.0 # counts per pixel
# Make a test image using true aberrations
img = fitter.model(
dx, dy, fwhm, z_true,
sky_level=sky_level, flux=5e6
)
# Now guess aberrations are 0.0, and try to recover truth.
guess = [0.0, 0.0, 0.7]+[0.0]*8
# We don't ship a custom fitting algorithm; just use scipy.least_squares
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(img, sky_level)
)
for i in range(4, 12):
out = f"{i:2d} {result.x[i-1]/wavelength:9.3f}"
out += f" {z_true[i-4]/wavelength:9.3f}"
out += f" {(result.x[i-1]-z_true[i-4])/wavelength:9.3f}"
print(out)
dx_fit, dy_fit, fwhm_fit, *z_fit = result.x
z_fit = np.array(z_fit)
# Optional visualization
# mod = fitter.model(
# dx_fit, dy_fit, fwhm_fit, z_fit
# )
# plot_result(img, mod, z_fit/wavelength, z_true/wavelength, ylim=(-0.2, 0.2))
np.testing.assert_allclose(dx_fit, dx, rtol=0, atol=1e-2)
np.testing.assert_allclose(dy_fit, dy, rtol=0, atol=1e-2)
np.testing.assert_allclose(fwhm_fit, fwhm, rtol=0, atol=5e-2)
np.testing.assert_allclose(z_fit, z_true, rtol=0, atol=0.005*wavelength)
rms = np.sqrt(np.sum(((z_true-z_fit)/wavelength)**2))
assert rms < 0.1, "rms %9.3f > 0.1" % rms
@timer
def test_dz_fitter_LSST_fiducial():
""" Roundtrip using danish model to produce test images with fiducial LSST
transverse Zernikes plus random double Zernike offsets. Model and fitter
run through the same code.
"""
telescope = batoid.Optic.fromYaml("LSST_i.yaml")
telescope = telescope.withGloballyShiftedOptic("Detector", [0, 0, 0.0015])
wavelength = 750e-9
rng = np.random.default_rng(2344)
nstar = 10
if __name__ == "__main__":
niter = 10
else:
niter = 1
for _ in range(niter):
thr = np.sqrt(rng.uniform(0, 1.8**2, nstar))
ph = rng.uniform(0, 2*np.pi, nstar)
thxs, thys = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_refs = np.empty((nstar, 67))
for i, (thx, thy) in enumerate(zip(thxs, thys)):
z_refs[i] = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_refs *= wavelength
dz_terms = (
(1, 4), # defocus
(2, 4), (3, 4), # field tilt
(2, 5), (3, 5), (2, 6), (3, 6), # linear astigmatism
(1, 7), (1, 8) # constant coma
)
dz_true = rng.uniform(-0.3, 0.3, size=len(dz_terms))*wavelength
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
fitter = danish.MultiDonutModel(
factory,
z_refs=z_refs, dz_terms=dz_terms,
field_radius=np.deg2rad(1.8),
thxs=thxs, thys=thys
)
dxs = rng.uniform(-0.5, 0.5, nstar)
dys = rng.uniform(-0.5, 0.5, nstar)
fwhm = rng.uniform(0.5, 1.5)
sky_levels = [1000.0]*nstar
fluxes = [5e6]*nstar
imgs = fitter.model(
dxs, dys, fwhm, dz_true, sky_levels=sky_levels, fluxes=fluxes
)
guess = [0.0]*nstar + [0.0]*nstar + [0.7] + [0.0]*len(dz_terms)
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(imgs, sky_levels)
)
dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
np.testing.assert_allclose(dxs, dxs_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(dys, dys_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(fwhm, fwhm_fit, rtol=0, atol=0.05)
np.testing.assert_allclose(
dz_fit/wavelength,
dz_true/wavelength,
rtol=0, atol=0.1
)
rms = np.sqrt(np.sum(((dz_true-dz_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.05, "rms %9.3f > 0.05" % rms
# dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
# mods = fitter.model(
# dxs_fit, dys_fit, fwhm_fit, dz_fit
# )
# plot_dz_results(
# imgs, mods, dz_fit/wavelength, dz_true/wavelength, dz_terms
# )
@timer
def test_dz_fitter_LSST_rigid_perturbation():
"""Roundtrip using danish model to produce a test images of rigid-body
perturbed LSST transverse Zernikes. Model and fitter run through the same
code.
"""
fiducial_telescope = batoid.Optic.fromYaml("LSST_i.yaml")
fiducial_telescope = fiducial_telescope.withGloballyShiftedOptic(
"Detector",
[0, 0, 0.0015]
)
wavelength = 750e-9
rng = np.random.default_rng(1234)
if __name__ == "__main__":
niter = 10
else:
niter = 1
for _ in range(niter):
M2_dx, M2_dy = rng.uniform(-2e-4, 2e-4, size=2)
M2_dz = rng.uniform(-2e-5, 2e-5)
M2_thx, M2_thy = rng.uniform(-2e-5, 2e-5, size=2)
cam_dx, cam_dy = rng.uniform(-2e-3, 2e-3, size=2)
cam_dz = rng.uniform(-2e-5, 2e-5)
cam_thx, cam_thy = rng.uniform(-2e-4, 2e-4, size=2)
telescope = (
fiducial_telescope
.withGloballyShiftedOptic("M2", [M2_dx, M2_dy, M2_dz])
.withLocallyRotatedOptic(
"M2", batoid.RotX(M2_thx)@batoid.RotY(M2_thy)
)
.withGloballyShiftedOptic("LSSTCamera", [cam_dx, cam_dy, cam_dz])
.withLocallyRotatedOptic(
"LSSTCamera", batoid.RotX(cam_thx)@batoid.RotY(cam_thy)
)
)
nstar = 10
thr = np.sqrt(rng.uniform(0, 1.8**2, nstar))
ph = rng.uniform(0, 2*np.pi, nstar)
thxs, thys = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_refs = np.empty((nstar, 67))
z_perturbs = np.empty((nstar, 67))
for i, (thx, thy) in enumerate(zip(thxs, thys)):
z_refs[i] = batoid.zernikeTA(
fiducial_telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_perturbs[i] = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_refs *= wavelength
z_perturbs *= wavelength
dz_ref = batoid.analysis.doubleZernike(
fiducial_telescope, np.deg2rad(1.8), wavelength, rings=10,
kmax=10, jmax=66, eps=0.61
)
dz_perturb = batoid.analysis.doubleZernike(
telescope, np.deg2rad(1.8), wavelength, rings=10,
kmax=10, jmax=66, eps=0.61
)
dz_terms = (
(1, 4), # defocus
(2, 4), (3, 4), # field tilt
(2, 5), (3, 5), (2, 6), (3, 6), # linear astigmatism
(1, 7), (1, 8) # constant coma
)
dz_true = np.empty(len(dz_terms))
for i, term in enumerate(dz_terms):
dz_true[i] = (dz_perturb[term] - dz_ref[term])
dz_true *= wavelength
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
# Toy zfitter to make test images
fitter0 = danish.MultiDonutModel(
factory, z_refs=z_perturbs, dz_terms=(),
field_radius=np.deg2rad(1.8),
thxs=thxs, thys=thys
)
dxs = rng.uniform(-0.5, 0.5, nstar)
dys = rng.uniform(-0.5, 0.5, nstar)
fwhm = rng.uniform(0.5, 1.5)
sky_levels = [1000.0]*nstar
fluxes = [5e6]*nstar
imgs = fitter0.model(
dxs, dys, fwhm, (), sky_levels=sky_levels, fluxes=fluxes
)
# Actual fitter with DOF to optimize...
fitter = danish.MultiDonutModel(
factory, z_refs=z_refs, dz_terms=dz_terms,
field_radius=np.deg2rad(1.8),
thxs=thxs, thys=thys
)
guess = [0.0]*nstar + [0.0]*nstar + [0.7] + [0.0]*len(dz_terms)
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(imgs, sky_levels)
)
dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
np.testing.assert_allclose(dxs, dxs_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(dys, dys_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(fwhm, fwhm_fit, rtol=0, atol=0.05)
np.testing.assert_allclose(
dz_fit/wavelength,
dz_true/wavelength,
rtol=0, atol=0.1
)
rms = np.sqrt(np.sum(((dz_true-dz_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.1, "rms %9.3f > 0.1" % rms
# mods = fitter.model(
# dxs_fit, dys_fit, fwhm_fit, dz_fit
# )
# plot_dz_results(
# imgs, mods, dz_fit/wavelength, dz_true/wavelength, dz_terms
# )
@timer
def test_dz_fitter_LSST_z_perturbation():
"""Roundtrip using danish model to produce a test images of rigid-body
perturbed LSST transverse Zernikes. Model and fitter run through the same
code.
"""
fiducial_telescope = batoid.Optic.fromYaml("LSST_i.yaml")
fiducial_telescope = fiducial_telescope.withGloballyShiftedOptic(
"Detector",
[0, 0, 0.0015]
)
wavelength = 750e-9
rng = np.random.default_rng(124)
if __name__ == "__main__":
niter = 10
else:
niter = 1
for _ in range(niter):
M2_dx, M2_dy = rng.uniform(-2e-4, 2e-4, size=2)
M2_dz = rng.uniform(-2e-5, 2e-5)
M2_thx, M2_thy = rng.uniform(-2e-5, 2e-5, size=2)
cam_dx, cam_dy = rng.uniform(-2e-3, 2e-3, size=2)
cam_dz = rng.uniform(-2e-5, 2e-5)
cam_thx, cam_thy = rng.uniform(-2e-4, 2e-4, size=2)
telescope = (
fiducial_telescope
.withGloballyShiftedOptic("M2", [M2_dx, M2_dy, M2_dz])
.withLocallyRotatedOptic(
"M2", batoid.RotX(M2_thx)@batoid.RotY(M2_thy)
)
.withGloballyShiftedOptic("LSSTCamera", [cam_dx, cam_dy, cam_dz])
.withLocallyRotatedOptic(
"LSSTCamera", batoid.RotX(cam_thx)@batoid.RotY(cam_thy)
)
)
M1 = telescope['M1']
M1_a = np.zeros(23)
M1_a[9:16] = rng.uniform(-20e-9, 20e-9, 7)
telescope = telescope.withSurface(
"M1",
batoid.Sum([
M1.surface,
batoid.Zernike(
M1_a,
R_outer=M1.obscuration.original.outer,
R_inner=M1.obscuration.original.inner
)
])
)
nstar = 10
thr = np.sqrt(rng.uniform(0, 1.8**2, nstar))
ph = rng.uniform(0, 2*np.pi, nstar)
thxs, thys = np.deg2rad(thr*np.cos(ph)), np.deg2rad(thr*np.sin(ph))
z_refs = np.empty((nstar, 67))
z_perturbs = np.empty((nstar, 67))
for i, (thx, thy) in enumerate(zip(thxs, thys)):
z_refs[i] = batoid.zernikeTA(
fiducial_telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_perturbs[i] = batoid.zernikeTA(
telescope, thx, thy, wavelength,
nrad=20, naz=120, reference='chief',
jmax=66, eps=0.61
)
z_refs *= wavelength
z_perturbs *= wavelength
dz_ref = batoid.analysis.doubleZernike(
fiducial_telescope, np.deg2rad(1.8), wavelength, rings=10,
kmax=10, jmax=66, eps=0.61
)
dz_perturb = batoid.analysis.doubleZernike(
telescope, np.deg2rad(1.8), wavelength, rings=10,
kmax=10, jmax=66, eps=0.61
)
dz_terms = (
(1, 4), # defocus
(2, 4), (3, 4), # field tilt
(2, 5), (3, 5), (2, 6), (3, 6), # linear astigmatism
(1, 7), (1, 8), # constant coma
(1, 9), (1, 10), # constant trefoil
(1, 11), # constant spherical
(1, 12), (1, 13), # second astigmatism
(1, 14), (1, 15) # quatrefoil
)
dz_true = np.empty(len(dz_terms))
for i, term in enumerate(dz_terms):
dz_true[i] = (dz_perturb[term] - dz_ref[term])
dz_true *= wavelength
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
# Toy zfitter to make test images
fitter0 = danish.MultiDonutModel(
factory, z_refs=z_perturbs, dz_terms=(),
field_radius=np.deg2rad(1.8),
thxs=thxs, thys=thys
)
dxs = rng.uniform(-0.5, 0.5, nstar)
dys = rng.uniform(-0.5, 0.5, nstar)
fwhm = rng.uniform(0.5, 1.5)
sky_levels = [1000.0]*nstar
fluxes = [5e6]*nstar
imgs = fitter0.model(
dxs, dys, fwhm, (), sky_levels=sky_levels, fluxes=fluxes
)
# Actual fitter with DOF to optimize...
fitter = danish.MultiDonutModel(
factory, z_refs=z_refs, dz_terms=dz_terms,
field_radius=np.deg2rad(1.8),
thxs=thxs, thys=thys
)
guess = [0.0]*nstar + [0.0]*nstar + [0.7] + [0.0]*len(dz_terms)
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(imgs, sky_levels)
)
dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
np.testing.assert_allclose(dxs, dxs_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(dys, dys_fit, rtol=0, atol=0.2)
np.testing.assert_allclose(fwhm, fwhm_fit, rtol=0, atol=0.05)
np.testing.assert_allclose(
dz_fit/wavelength,
dz_true/wavelength,
rtol=0, atol=0.1
)
rms = np.sqrt(np.sum(((dz_true-dz_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.2, "rms %9.3f > 0.2" % rms
# mods = fitter.model(
# dxs_fit, dys_fit, fwhm_fit, dz_fit
# )
# plot_dz_results(
# imgs, mods, dz_fit/wavelength, dz_true/wavelength, dz_terms
# )
@timer
def test_dz_fitter_LSST_kolm():
with open(
os.path.join(directory, "data", "test_kolm_donuts.pkl"),
'rb'
) as f:
data = pickle.load(f)
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
sky_level = data[0]['sky_level']
wavelength = data[0]['wavelength']
dz_ref = data[0]['dz_ref']
dz_actual = data[0]['dz_actual']
thxs = []
thys = []
z_refs = []
z_actuals = []
imgs = []
for datum in data[1:]:
thxs.append(datum['thx'])
thys.append(datum['thy'])
z_refs.append(datum['z_ref'])
z_actuals.append(datum['z_actual'])
imgs.append(datum['arr'][::-1, ::-1])
dz_terms = (
(1, 4), # defocus
(2, 4), (3, 4), # field tilt
(2, 5), (3, 5), (2, 6), (3, 6), # linear astigmatism
(1, 7), (1, 8), # constant coma
(1, 9), (1, 10), # constant trefoil
(1, 11), # constant spherical
(1, 12), (1, 13), # second astigmatism
(1, 14), (1, 15), # quatrefoil
(1, 16), (1, 17),
(1, 18), (1, 19),
(1, 20), (1, 21),
(1, 22)
)
dz_true = np.empty(len(dz_terms))
for i, term in enumerate(dz_terms):
dz_true[i] = (dz_actual[term] - dz_ref[term])*wavelength
fitter = danish.MultiDonutModel(
factory, z_refs=np.array(z_refs)*wavelength, dz_terms=dz_terms,
field_radius=np.deg2rad(1.8), thxs=thxs, thys=thys
)
nstar = len(thxs)
guess = [0.0]*nstar + [0.0]*nstar + [0.7] + [0.0]*len(dz_terms)
sky_levels = [sky_level]*nstar
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(imgs, sky_levels)
)
dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
np.testing.assert_allclose(
dz_fit/wavelength,
dz_true/wavelength,
rtol=0, atol=0.1
)
rms = np.sqrt(np.sum(((dz_true-dz_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.2, "rms %9.3f > 0.2" % rms
# mods = fitter.model(
# dxs_fit, dys_fit, fwhm_fit, dz_fit
# )
# plot_dz_results(
# imgs, mods, dz_fit/wavelength, dz_true/wavelength, dz_terms
# )
@timer
def test_dz_fitter_LSST_atm():
with open(
os.path.join(directory, "data", "test_atm_donuts.pkl"),
'rb'
) as f:
data = pickle.load(f)
factory = danish.DonutFactory(
R_outer=4.18, R_inner=2.5498,
obsc_radii=LSST_obsc_radii, obsc_motion=LSST_obsc_motion,
focal_length=10.31, pixel_scale=10e-6
)
sky_level = data[0]['sky_level']
wavelength = data[0]['wavelength']
dz_ref = data[0]['dz_ref']
dz_actual = data[0]['dz_actual']
thxs = []
thys = []
z_refs = []
z_actuals = []
imgs = []
for datum in data[1:]:
thxs.append(datum['thx'])
thys.append(datum['thy'])
z_refs.append(datum['z_ref'])
z_actuals.append(datum['z_actual'])
imgs.append(datum['arr'][::-1, ::-1])
dz_terms = (
(1, 4), # defocus
(2, 4), (3, 4), # field tilt
(2, 5), (3, 5), (2, 6), (3, 6), # linear astigmatism
(1, 7), (1, 8), # constant coma
(1, 9), (1, 10), # constant trefoil
(1, 11), # constant spherical
(1, 12), (1, 13), # second astigmatism
(1, 14), (1, 15), # quatrefoil
(1, 16), (1, 17),
(1, 18), (1, 19),
(1, 20), (1, 21),
(1, 22)
)
dz_true = np.empty(len(dz_terms))
for i, term in enumerate(dz_terms):
dz_true[i] = (dz_actual[term] - dz_ref[term])*wavelength
fitter = danish.MultiDonutModel(
factory, z_refs=np.array(z_refs)*wavelength, dz_terms=dz_terms,
field_radius=np.deg2rad(1.8), thxs=thxs, thys=thys
)
nstar = len(thxs)
guess = [0.0]*nstar + [0.0]*nstar + [0.7] + [0.0]*len(dz_terms)
sky_levels = [sky_level]*nstar
result = least_squares(
fitter.chi, guess, jac=fitter.jac,
ftol=1e-3, xtol=1e-3, gtol=1e-3,
max_nfev=20, verbose=2,
args=(imgs, sky_levels)
)
dxs_fit, dys_fit, fwhm_fit, dz_fit = fitter.unpack_params(result.x)
np.testing.assert_allclose(
dz_fit/wavelength,
dz_true/wavelength,
rtol=0, atol=0.2
)
rms = np.sqrt(np.sum(((dz_true-dz_fit)/wavelength)**2))
print(f"rms = {rms:9.3f} waves")
assert rms < 0.4, "rms %9.3f > 0.4" % rms
# mods = fitter.model(
# dxs_fit, dys_fit, fwhm_fit, dz_fit
# )
# plot_dz_results(
# imgs, mods, dz_fit/wavelength, dz_true/wavelength, dz_terms
# )
if __name__ == "__main__":
test_fitter_LSST_fiducial()
test_fitter_LSST_rigid_perturbation()
test_fitter_LSST_z_perturbation()
test_fitter_LSST_kolm()
test_fitter_LSST_atm()
test_fitter_AuxTel_rigid_perturbation()
test_dz_fitter_LSST_fiducial()
test_dz_fitter_LSST_rigid_perturbation()
test_dz_fitter_LSST_z_perturbation()
test_dz_fitter_LSST_kolm()
test_dz_fitter_LSST_atm()
| 1.8125 | 2 |
scripts/update_songs_assets.py | theastropath/turbot | 10 | 12794497 | #!/usr/bin/env python3
from pathlib import Path
import requests
from bs4 import BeautifulSoup
page = requests.get(
"https://animalcrossing.fandom.com/wiki/K.K._Slider_song_list_(New_Horizons)"
)
tree = BeautifulSoup(page.content, "lxml")
with open(Path("src") / "turbot" / "assets" / "songs.csv", "w", newline="") as out:
def data_from(item):
title = item.select("a")[1]
return title.text
table_tag = tree.select("table")[1]
data = [
[data_from(item) for item in row_data.select("td")]
for row_data in table_tag.select("tr")
]
out.write("name\n")
for row in data:
for title in row:
out.write(f"{title}\n")
| 3.15625 | 3 |
konversi_suhu.py | salimsuprayogi/program_dasar_python | 1 | 12794498 | <gh_stars>1-10
import os
def main():
# rumus yang digunakan C = 5 * (F-32)/9
# menampilkan informasi program
print("Konversi Suhu dari Fahrenheit ke Celcius)\n")
# input suhu dalam fahrenheit
f = float(input("Masukan suhu (Fahrenheit): "))
# melakukan konversi suhu ke Celcius
c = 5 * (f-32)/9
# menampilkan hasil konversi
print("Fahrenheit \t:", f)
print("Celcius \t:", c, "\n")
os.system("pause")
if __name__ == "__main__":
main()
| 3.5625 | 4 |
tests/vcf_chunker_test.py | oxfordfun/minos | 14 | 12794499 | import filecmp
import shutil
import os
import unittest
import cluster_vcf_records
from minos import vcf_chunker
this_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(this_dir, "data", "vcf_chunker")
class TestVcfChunker(unittest.TestCase):
def test_total_variants_and_alleles_in_vcf_dict(self):
"""test _total_variants_and_alleles_in_vcf_dict"""
class FakeVcf:
def __init__(self, alt):
self.ALT = alt
test_dict = {
"chrom1": [FakeVcf("123"), FakeVcf("1"), FakeVcf("123456789")],
"chrom2": [FakeVcf("12"), FakeVcf("1234")],
}
expect_variants = 5
expect_alleles = 24
(
got_variants,
got_alleles,
) = vcf_chunker.VcfChunker._total_variants_and_alleles_in_vcf_dict(test_dict)
self.assertEqual(expect_variants, got_variants)
self.assertEqual(expect_alleles, got_alleles)
def test_chunk_end_indexes_from_vcf_record_list(self):
"""test _chunk_end_indexes_from_vcf_record_list"""
record_list = [
cluster_vcf_records.vcf_record.VcfRecord("ref\t1\t.\tA\tG\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord(
"ref\t2\t.\tC\tT,A,G,TA\t.\t.\t.\t."
),
cluster_vcf_records.vcf_record.VcfRecord("ref\t3\t.\tT\tA,C\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord(
"ref\t5\t.\tAGAGTCACGTA\tG\t.\t.\t.\t."
),
cluster_vcf_records.vcf_record.VcfRecord("ref\t18\t.\tA\tG\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord("ref\t21\t.\tG\tT\t.\t.\t.\t."),
]
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=1
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=2
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=3
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=4
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=5
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=6
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=7
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=8
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=9
),
)
self.assertEqual(
(0, 2, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=10
),
)
self.assertEqual(
(0, 2, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=11
),
)
self.assertEqual(
(0, 3, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_alleles=12
),
)
self.assertEqual(
(0, 0, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=1
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=2
),
)
self.assertEqual(
(0, 2, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=3
),
)
self.assertEqual(
(0, 3, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=4
),
)
self.assertEqual(
(0, 4, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=5
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=6
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=7
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 1, total_sites=8
),
)
self.assertEqual(
(0, 0, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=1
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=2
),
)
self.assertEqual(
(0, 2, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=3
),
)
self.assertEqual(
(0, 3, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=4
),
)
self.assertEqual(
(0, 4, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=5
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=6
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=7
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 2, total_sites=8
),
)
self.assertEqual(
(0, 0, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=1
),
)
self.assertEqual(
(0, 1, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=2
),
)
self.assertEqual(
(0, 2, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=3
),
)
self.assertEqual(
(0, 3, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=4
),
)
self.assertEqual(
(0, 4, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=5
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=6
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=7
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 3, total_sites=8
),
)
self.assertEqual(
(0, 0, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=1
),
)
self.assertEqual(
(0, 1, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=2
),
)
self.assertEqual(
(0, 2, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=3
),
)
self.assertEqual(
(0, 3, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=4
),
)
self.assertEqual(
(0, 4, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=5
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=6
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 4, total_sites=7
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 1, total_sites=1
),
)
self.assertEqual(
(0, 1, 2),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 2, total_sites=1
),
)
self.assertEqual(
(0, 1, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 3, total_sites=1
),
)
self.assertEqual(
(0, 1, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 15, total_sites=1
),
)
self.assertEqual(
(0, 1, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 16, total_sites=1
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 1, 1, total_sites=6
),
)
self.assertEqual(
(4, 4, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 4, 1, total_sites=1
),
)
self.assertEqual(
(4, 4, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 4, 2, total_sites=1
),
)
self.assertEqual(
(3, 4, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 4, 3, total_sites=1
),
)
self.assertEqual(
(4, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 4, 1, total_sites=2
),
)
self.assertEqual(
(5, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 1, total_sites=1
),
)
self.assertEqual(
(5, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 1, total_sites=2
),
)
self.assertEqual(
(5, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 2, total_sites=2
),
)
self.assertEqual(
(4, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 3, total_sites=2
),
)
self.assertEqual(
(4, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 4, total_sites=2
),
)
self.assertEqual(
(4, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 5, total_sites=2
),
)
self.assertEqual(
(3, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 6, total_sites=2
),
)
self.assertEqual(
(3, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 7, total_sites=2
),
)
self.assertEqual(
(3, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 17, total_sites=2
),
)
self.assertEqual(
(2, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 18, total_sites=2
),
)
self.assertEqual(
(1, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 19, total_sites=2
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 20, total_sites=2
),
)
self.assertEqual(
(0, 5, 5),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 5, 21, total_sites=2
),
)
# These records caused minos error because variant at 800
# was included in the last split file, but the use_end_index was at
# position of the variant at 610. So the one at 800 was not getting used.
record_list = [
cluster_vcf_records.vcf_record.VcfRecord("ref\t75\t.\tA\tG\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord("ref\t150\t.\tG\tA,T\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord("ref\t450\t.\tT\tC\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord("ref\t610\t.\tA\tG\t.\t.\t.\t."),
cluster_vcf_records.vcf_record.VcfRecord("ref\t800\t.\tC\tCA\t.\t.\t.\t."),
]
self.assertEqual(
(0, 1, 1),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 0, 100, total_sites=2
),
)
self.assertEqual(
(2, 3, 3),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 2, 100, total_sites=2
),
)
self.assertEqual(
(4, 4, 4),
vcf_chunker.VcfChunker._chunk_end_indexes_from_vcf_record_list(
record_list, 4, 100, total_sites=2
),
)
def test_make_split_files(self):
"""test make_split_files"""
infile = os.path.join(data_dir, "make_split_files.in.vcf")
tmp_out = "tmp.vcf_chunker.make_split_files"
ref_fa = os.path.join(data_dir, "make_split_files.in.ref.fa")
if os.path.exists(tmp_out):
shutil.rmtree(tmp_out)
vcf1 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t1\t.\tG\tT\t.\tPASS\t.\t.\t."
)
vcf2 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t2\t.\tC\tT\t.\tPASS\t.\t.\t."
)
vcf3 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t3\t.\tT\tA\t.\tPASS\t.\t.\t."
)
vcf4 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t5\t.\tAGAGTCACGTA\tG\t.\tPASS\t.\t.\t."
)
vcf5 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t18\t.\tA\tG\t.\tPASS\t.\t.\t."
)
vcf6 = cluster_vcf_records.vcf_record.VcfRecord(
"ref1\t21\t.\tG\tT\t.\tPASS\t.\t.\t."
)
vcf7 = cluster_vcf_records.vcf_record.VcfRecord(
"ref2\t42\t.\tC\tG\t.\tPASS\t.\t.\t."
)
header_lines = [
"##header1",
"##header2",
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample_name",
]
chunker = vcf_chunker.VcfChunker(
tmp_out,
vcf_infile=infile,
ref_fasta=ref_fa,
variants_per_split=2,
flank_length=1,
gramtools_kmer_size=5,
)
chunker.make_split_files()
self.assertTrue(os.path.exists(chunker.metadata_pickle))
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.0.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf1, vcf2, vcf3], got_records)
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.1.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf2, vcf3, vcf4], got_records)
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.2.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf5, vcf6], got_records)
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.3.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf7], got_records)
self.assertFalse(os.path.exists(os.path.join(tmp_out, "split.4.in.vcf")))
shutil.rmtree(tmp_out)
chunker = vcf_chunker.VcfChunker(
tmp_out,
vcf_infile=infile,
ref_fasta=ref_fa,
variants_per_split=4,
flank_length=3,
gramtools_kmer_size=5,
)
chunker.make_split_files()
self.assertTrue(os.path.exists(chunker.metadata_pickle))
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.0.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf1, vcf2, vcf3, vcf4, vcf5], got_records)
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.1.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf4, vcf5, vcf6], got_records)
got_header, got_records = cluster_vcf_records.vcf_file_read.vcf_file_to_list(
os.path.join(tmp_out, "split.2.in.vcf")
)
self.assertEqual(header_lines, got_header)
self.assertEqual([vcf7], got_records)
self.assertFalse(os.path.exists(os.path.join(tmp_out, "split.3.in.vcf")))
chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5)
self.assertEqual(chunker.vcf_infile, chunker2.vcf_infile)
self.assertEqual(chunker.ref_fasta, chunker2.ref_fasta)
self.assertEqual(chunker.variants_per_split, chunker2.variants_per_split)
self.assertEqual(chunker.total_splits, chunker2.total_splits)
self.assertEqual(chunker.flank_length, chunker2.flank_length)
self.assertEqual(chunker.gramtools_kmer_size, chunker2.gramtools_kmer_size)
self.assertEqual(chunker.total_split_files, chunker2.total_split_files)
self.assertEqual(chunker.vcf_split_files, chunker2.vcf_split_files)
shutil.rmtree(tmp_out)
def test_make_split_files_2(self):
"""test make_split_files with different input from previous test"""
# These records cause a minos bug. Last record was not being used
# when merging because the index was wrong.
# They are test data from multi_sample_pipeline tests
infile = os.path.join(data_dir, "make_split_files2.in.vcf")
tmp_out = "tmp.vcf_chunker.make_split_files2"
ref_fa = os.path.join(data_dir, "make_split_files2.in.ref.fa")
if os.path.exists(tmp_out):
shutil.rmtree(tmp_out)
chunker = vcf_chunker.VcfChunker(
tmp_out,
vcf_infile=infile,
ref_fasta=ref_fa,
variants_per_split=2,
flank_length=200,
gramtools_kmer_size=5,
)
chunker.make_split_files()
self.assertTrue(os.path.exists(chunker.metadata_pickle))
chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5)
self.assertEqual(1, len(chunker2.vcf_split_files))
self.assertEqual(3, len(chunker2.vcf_split_files["ref.0"]))
self.assertEqual(4, chunker2.vcf_split_files["ref.0"][-1].use_end_index)
shutil.rmtree(tmp_out)
# Test with two threads
chunker = vcf_chunker.VcfChunker(
tmp_out,
vcf_infile=infile,
ref_fasta=ref_fa,
variants_per_split=2,
flank_length=200,
threads=2,
gramtools_kmer_size=5,
)
chunker.make_split_files()
self.assertTrue(os.path.exists(chunker.metadata_pickle))
chunker2 = vcf_chunker.VcfChunker(tmp_out, gramtools_kmer_size=5)
self.assertEqual(1, len(chunker2.vcf_split_files))
self.assertEqual(3, len(chunker2.vcf_split_files["ref.0"]))
self.assertEqual(4, chunker2.vcf_split_files["ref.0"][-1].use_end_index)
shutil.rmtree(tmp_out)
def test_merge_files(self):
"""test merge_files"""
vcf_to_split = os.path.join(data_dir, "merge_files.in.vcf")
ref_fasta = os.path.join(data_dir, "merge_files.in.ref.fa")
tmp_outdir = "tmp.vcf_chunker.merge_files"
chunker = vcf_chunker.VcfChunker(
tmp_outdir,
vcf_infile=vcf_to_split,
ref_fasta=ref_fasta,
variants_per_split=4,
flank_length=3,
gramtools_kmer_size=5,
)
chunker.make_split_files()
to_merge = {}
for ref, split_list in chunker.vcf_split_files.items():
to_merge[ref] = [x.filename for x in split_list]
tmp_vcf_out = "tmp.vcf_chunker.merge_files.out.vcf"
chunker.merge_files(to_merge, tmp_vcf_out)
self.assertTrue(filecmp.cmp(vcf_to_split, tmp_vcf_out, shallow=False))
os.unlink(tmp_vcf_out)
shutil.rmtree(tmp_outdir)
| 2.359375 | 2 |
reth/reth/algorithm/dqn/__init__.py | sosp2021/Reth | 0 | 12794500 | from .dqn_solver import DQNSolver
| 1.039063 | 1 |
labs/03_neural_recsys/movielens_paramsearch_results.py | soufiomario/labs-Deep-learning | 1,398 | 12794501 | <filename>labs/03_neural_recsys/movielens_paramsearch_results.py
import pandas as pd
from pathlib import Path
import json
def load_results_df(folder='results'):
folder = Path(folder)
results_dicts = []
for p in sorted(folder.glob('**/results.json')):
with p.open('r') as f:
results_dicts.append(json.load(f))
return pd.DataFrame.from_dict(results_dicts)
if __name__ == "__main__":
df = load_results_df().sort_values(by=['test_mae'], ascending=True)
print(df.head(5))
| 2.90625 | 3 |
src/KnotTheory/HFK-Zurich/simplify/fastUnknot2.py | craigfreilly/masters-project-submission | 0 | 12794502 | <filename>src/KnotTheory/HFK-Zurich/simplify/fastUnknot2.py
import RectDia
import FastRectDiag
from RectDia import RectDia
from FastRectDiag import FastRectDiag
import profile
def printHistory(diag):
tmp=diag
while(1):
## if tmp.isdestabilisable()!=0 :print tmp.isdestabilisable()
## print tmp.xSorted
## print tmp.ySorted
print tmp.toRectDia().toStringNice()
if tmp.predecessor==0:break
tmp=tmp.predecessor
def unknot(diag,bound):
diag=FastRectDiag(diag)
print diag.xSorted
print diag.ySorted
counter=0
n=diag.complexity
stack=[diag]
hmap=dict({diag.hashInt():0})
while(counter<bound):
if len(stack)==0:
## printHistory(diag)
return ("not an unknot",diag)
else: diag=stack.pop()
## diag.draw()##debug!!
if diag.complexity<3:
## printHistory(diag)
return ("not knotted",diag)
if diag.isdestabilisable():
print "reduction!"+str(len(stack))
counter=0
## diag.draw()##debug!!
## print diag.xSorted
## print diag.ySorted
des=diag.isdestabilisable()
tmp=diag.copy()
tmp.m_destabilisation(des[0],des[1])
tmp.predecessor=diag
diag=tmp
## print len(diag.points)
## print diag.xSorted
## print diag.ySorted
stack=[diag]
## stack[0].draw()##debug!!
hmap=dict({diag.hashInt():0})
continue
succ=diag.fastsuccCa(hmap)
## print len(diag.succCa())
for k in succ:
## if not hmap.has_key(k.hashInt()):
k.predecessor=diag
tmp=k.isdestabilisableAdvanced()
if tmp!=0:
stack.append(tmp)
break
if k.isdestabilisable():
stack.append(k)
break
else:
stack=[k]+stack
hmap[k.hashInt()]=0
counter+=1
if counter%5000==0:
print "Please wait!"+str((counter,len(hmap)))
## printHistory(diag)
return ("unknown",diag)
if __name__ == "__main__":
## dd=RectDia([(0,0),(0,4),(1,2),(1,8),(2,7),(2,9),(3,6),(3,8),(4,1),(4,3),(5,2),(5,7),(6,0),(6,3),(7,1),(7,5),(8,4),(8,6),(9,5),(9,9)])
dd=RectDia([(0,23),(0,6),(1,21),(1,7),(2,19),(2,11),(3,0),(3,4),(4,5),(4,18),(5,3),(5,1),(6,7),(6,2),(7,16),(7,8),(8,11),(8,6),(9,13),(9,5),(10,12),(10,4),(11,9),(11,3),(12,15),(12,8),(13,21),(13,13),(14,10),(14,1),(15,15),(15,9),(16,17),(16,14),(17,16),(17,12),(18,18),(18,10),(19,14),(19,0),(20,20),(20,17),(21,22),(21,19),(22,23),(22,20),(23,22),(23,2)])
## dd.draw()
import profile
profile.run("print unknot(dd,10000)")
## print unknot(dd)
| 2.796875 | 3 |
src/compiler/build.py | glgomes/clonebuilder | 1 | 12794503 | # -*- coding: UTF-8 -*-
"""
@author rpereira
Apr 10, 2012
Controle do build, extract feito de funções do legado.
"""
import legacy.makefile as mkfile
from legacy.compiler_builder import CompilerBuilder
import legacy.futil as futil
import wx
import string
import re
from controller.front import FrontController
from legacy.file import BuilderFile
import legacy.fileres as fileres
import legacy.makefile as mkfile
import legacy.altver as altver
import multiprocessing
import sys
class Msg:
SUCCESS = 0
WARNING = 1
ERROR = 2
EOF = 3
class Ctrl(object):
"""
@author rpereira
Classe de controle do build.
"""
#Borg Design Pattern
__shared_state = {}
def __init__(self):
self.__threads_spin = None
self.__console_panel = None
self.__list_ctrl = None
self.__dict__ = self.__shared_state
self.cancel = False
self.cmd = False
self.__error = False
def set_cmd(self):
self.cmd = True
def set_console_panel(self, console_panel):
self.__console_panel = console_panel
self.__list_ctrl = console_panel.list_ctrl
def make(self, path, file, paramExtr = ""):
"""
Build/make geral do arquivo de makefile indicado
@param path do arquivo de makefile
@param parametro adicional ao comando de build/make
"""
fctrl = FrontController()
self.__error = False
#TODO: tirar esses returns 100 (mudar para uma mensagem de erro)
if not self.altera_versao(fctrl.get_version(), path):
return 100
#TODO: tirar esses returns 100
if not self.update_res(path, file):
return 100
num_threads = fctrl.get_num_threads()
try:
if num_threads == 0:
num_threads = multiprocessing.cpu_count()
except Exception:
num_threads = 1
mkfile.gera_make(path, file, num_threads)
self.__log("Makefile: " + file + " atualizado")
#TODO: extract .mak extension
self.job = CompilerBuilder(
path,
file,
num_threads > 1)
self.job.compile(paramExtr)
out = " "
self.__log("make " + file)
#TODO: tirar o while true
while True:
out = self.job.readMsg()
if out == "":
self.__log("FIM: " + file)
return self.job.close()
self.__log(out)
if self.cancel == True or self.__error == True:
self.job.close()
self.__log("Cancelado: " + file)
self.cancel = False
return 100
self.job = None
return 0
def altera_versao(self, version, path):
"""
Edita o BPK/BPR alterando a versão
"""
success = True
out = ""
try:
change_version = altver.AlteraVer(version, path)
out = change_version.run()
self.__log(out)
except Exception as e:
print e
success = False
raise e
return success
def update_res(self, path, file):
"""
Edita e gera o arquivo RES do projeto
"""
success = True
builder = None
try:
builder = BuilderFile(path + "\\" + file)
builder.open()
versao = builder.getInfo("FileVersion")
fileDescription = builder.getInfo("FileDescription")
productName = builder.getInfo("ProductName")
dataRes = fileres.DataRes();
dataRes.fileVersion = versao
dataRes.fileDescription = fileDescription
dataRes.productName = productName
fileProject = mkfile.find_type_project_bin(path, file)
dataRes.internalName = fileProject
dataRes.originalFileName = fileProject
self.generate_res_file(path, file, dataRes)
except:
self.__log("Nao foi possivel gerar RES para " + file)
success = False
finally:
if builder <> None:
builder.close()
return success
def generate_res_file(self, path, file, dataRes):
"""
Gera arquivo RES do projeto
"""
compRes = fileres.FileRes()
compRes.CriaRes(path, file, dataRes)
fileRes = futil.change_ext(file, 'res')
self.__log("Arquivo RES: " + fileRes + " gerado")
def __check_msg(self, linha):
ret = Msg.SUCCESS
if ((re.match("[E|e]rror", linha) or re.match(".*[E|e]rror [E]\d\d\d\d", linha))and
not (string.find(linha, "Error messages:") == 0 and
string.find(linha, "None") != -1)):
ret = Msg.ERROR
elif (re.match("[F|f]atal", linha) and
not (string.find(linha, "None") != -1)):
ret = Msg.ERROR
elif (re.match("FIM:", linha) and
not (string.find(linha, "None") != -1)):
ret = Msg.EOF
elif (re.match(".*[W|w]arning [W]\d\d\d\d", linha)and
not (string.find(linha, "Warning messages:") == 0 and
string.find(linha, "None") != -1)):
ret = Msg.WARNING
return ret
def __log(self, text):
"""
Imprime texto, na saída definida
@param text dado a ser impresso
"""
listVal = futil.trata_texto(text)
for linha in listVal:
msg_ret = self.__check_msg(linha)
if self.cmd:
if (msg_ret == Msg.ERROR):
self.__error = True
print linha
else:
index = self.__list_ctrl.GetItemCount()
self.__list_ctrl.InsertStringItem(index, linha)
self.__list_ctrl.EnsureVisible(self.__list_ctrl.GetItemCount()-1)
color = wx.WHITE
if (msg_ret == Msg.ERROR):
color = wx.RED
self.__error = True
elif (msg_ret == Msg.WARNING):
color = wx.NamedColour("yellow")
elif (msg_ret == Msg.EOF):
color = wx.GREEN
self.__list_ctrl.SetItemBackgroundColour(index, color)
self.__list_ctrl.SetColumnWidth(0, wx.LIST_AUTOSIZE)
| 2.203125 | 2 |
radiopadre_kernel/js9/__init__.py | ratt-ru/radiopadre | 9 | 12794504 | <gh_stars>1-10
import os, os.path, traceback
from iglesia.utils import message, error
# init JS9 configuration
# js9 source directory
DIRNAME = os.path.dirname(__file__)
JS9_DIR = None
JS9_ERROR = os.environ.get("RADIOPADRE_JS9_ERROR") or None
JS9_HELPER_PORT = None
# Javascript code read from local settings file
JS9_LOCAL_SETTINGS = None
JS9_INIT_HTML_STATIC = JS9_INIT_HTML_DYNAMIC = ""
class JS9Error(Exception):
def __init__(self, message=None):
self.message = message
def preinit_js9():
"""Pre-initialization, when Javascript is not available yet. Determines paths and starts helper processs"""
global radiopadre_kernel
import radiopadre_kernel
import iglesia
global JS9_HELPER_PORT, JS9_DIR
JS9_DIR = iglesia.JS9_DIR
JS9_HELPER_PORT = iglesia.JS9HELPER_PORT
try:
global JS9_ERROR
if not os.path.exists(JS9_DIR):
raise JS9Error(f"{JS9_DIR} does not exist")
message(f"Using JS9 install in {JS9_DIR}")
global RADIOPADRE_INSTALL_PREFIX
global RADIOPADRE_LOCAL_PREFIX
global JS9_INSTALL_PREFIX
global JS9_INIT_HTML_STATIC
global JS9_INIT_HTML_DYNAMIC
global JS9_SCRIPT_PREFIX
global JS9_LOCAL_SETTINGS
RADIOPADRE_INSTALL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/radiopadre-www" # URL used to access radiopadre code
RADIOPADRE_LOCAL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/{radiopadre_kernel.ABSROOTDIR}/.radiopadre" # URL used to access radiopadre aux dir
JS9_INSTALL_PREFIX = f"{radiopadre_kernel.SHADOW_URL_PREFIX}/js9-www" # URL used to access JS9 code
JS9_SCRIPT_PREFIX = radiopadre_kernel.SHADOW_URL_PREFIX
JS9_LOCAL_SETTINGS = f"{radiopadre_kernel.SESSION_URL}/js9prefs.js"
# load templated init HTML
try:
with open(os.path.join(DIRNAME, "js9-init-static-template.html"), "rt") as inp:
JS9_INIT_HTML_STATIC = inp.read().format(**globals())
with open(os.path.join(DIRNAME, "js9-init-dynamic-template.html"), "rt") as inp:
JS9_INIT_HTML_DYNAMIC = inp.read().format(**globals())
except Exception as exc:
traceback.print_exc()
JS9_ERROR = "Error reading init templates: {}".format(str(exc))
except JS9Error as exc:
if exc.message:
JS9_ERROR = exc.message
# on error, init code replaced by error message
if JS9_ERROR:
error(f"JS9 init error: {JS9_ERROR}")
# def init_js9():
# """Final initialization, when Javascript can be injected"""
# from IPython.display import Javascript, display
# display(Javascript("""
# <link type='image/x-icon' rel='shortcut icon' href='/static/js9-www/favicon.ico'>
# <link type='text/css' rel='stylesheet' href='/static/js9-www/js9support.css'>
# <link type='text/css' rel='stylesheet' href='/static/js9-www/js9.css'>
# <link rel='apple-touch-icon' href='/static/js9-www/images/js9-apple-touch-icon.png'>
# <script type='text/javascript' src='/static/js9-www/js9prefs.js'></script>
# <script type='text/javascript'> console.log('loaded JS9 prefs 1') </script>
# <script type='text/javascript' src='/files/.radiopadre-session/js9prefs.js'></script>
# <script type='text/javascript'> console.log('loaded JS9 prefs 2')</script>
# <script type='text/javascript' src='/static/js9-www/js9support.min.js'></script>
# <script type='text/javascript' src='/static/js9-www/js9.min.js'></script>
# <script type='text/javascript' src='/static/js9-www/js9plugins.js'></script>
# <script type='text/javascript'> console.log('loaded JS9 components') </script>
# <script type='text/javascript' src='/static/radiopadre-www/js9partners.js'></script>
# <script type='text/javascript'> console.log('loaded JS9 partner plugin') </script>
# <script type='text/javascript' src='/static/js9colormaps.js'></script>\
# """),) | 2.171875 | 2 |
setup.py | Ravindrasaragadam/Tweeter-Sentiment-Analysis | 0 | 12794505 | <gh_stars>0
from cx_Freeze import setup, Executable
base = None
executables = [Executable("Analysis.py", base=base)]
packages = ["idna"]
options = {
'build_exe': {
'packages': packages,
},
}
setup(
name="Tweeter Sentiment Analysis",
options=options,
version="1.0",
description='Tweeter tweets analysis',
executables=executables
)
| 1.421875 | 1 |
aerokit/aero/riemann.py | PierreMignerot/aerokit | 0 | 12794506 | <reponame>PierreMignerot/aerokit
# backward compatibility
from aerokit.instance.riemann import * | 0.683594 | 1 |
auxiliary/simulation_study.py | marclipfert/student-project-antonia-marc | 0 | 12794507 | <reponame>marclipfert/student-project-antonia-marc<filename>auxiliary/simulation_study.py
#*************************** SMALL SIMULATION STUDY *****************************
#************* TIME-VARYING TREATMENT: MOVE UP CONCEPTIONS IN TIME **************
import numpy as np
import pandas as pd
import math as math
import statsmodels.formula.api as smf
import scipy.stats as ss
def simulating_results_from_different_bandwidths(running, dummy, treatment, N):
# create empty arrays to store results inside
params = np.zeros((5,N), np.float)
bse = np.zeros((5,N), np.float)
in_ci = np.zeros((5,N), np.float)
# quantile for 95% confidence interval
q = ss.norm.ppf(0.975)
for n in range(N):
y = 20 + 0.05*running + treatment + np.random.normal(0, 0.5, len(running))
df = pd.DataFrame(data = {'y': y, 'z': running, 'D': dummy})
reg1 = smf.ols(formula = 'y ~ z + D + D*z', data = df).fit(cov_type='HC1')
reg2 = smf.ols(formula = 'y ~ z + D + D*z', data = df.loc[(df['z']>-31) & (df['z']<30)]).fit(cov_type='HC1')
reg3 = smf.ols(formula = 'y ~ z + D + D*z', data = df.loc[(df['z']>-13) & (df['z']<12)]).fit(cov_type='HC1')
reg4 = smf.ols(formula = 'y ~ z + D + D*z', data = df.loc[(df['z']>-10) & (df['z']<9)]).fit(cov_type='HC1')
reg5 = smf.ols(formula = 'y ~ D', data = df.loc[(df['z']>-4) & (df['z']<3)]).fit(cov_type='HC1')
reg_list = [reg1, reg2, reg3, reg4, reg5]
ci_lower = [0, 0, 0, 0, 0]
ci_upper = [0, 0, 0, 0, 0]
for i in range(5):
params[i,n] = reg_list[i].params['D']
bse[i,n] = reg_list[i].bse['D']
ci_lower[i] = reg_list[i].params['D'] - q*reg_list[i].bse['D']
ci_upper[i] = reg_list[i].params['D'] + q*reg_list[i].bse['D']
if ci_lower[i] <= 0 <= ci_upper[i]:
in_ci[i, n] = 1
else:
in_ci[i, n] = 0
return params, bse, in_ci
# Print results
def print_simulation_results(params, bse, in_ci, N):
print('Simulation Study - Results')
print('\u2014'*100)
# header
print('{:<22s}{:>14s}{:>14s}{:>14s}{:>14s}{:>14s}'
.format("", "RDD (1)", "RDD (2)", "RDD (3)", "RDD (4)", "RDD (5)"))
print('{:<22s}{:>14s}{:>14s}{:>14s}{:>14s}{:>14s}'
.format("", "10 years", "5 years", "12 months", "9 months", "3 months"))
print('\u2014'*100)
# Average coefficient
print('{:<25s}'.format("Estimated Coef. of D"), end="")
# coefficient estimate
for i in range(len(params)):
print ('{:>10.4f}'.format(params[i,:].mean()), end=" ")
print(" "*116)
# Average coefficient
print('{:<25s}'.format("Standard Error"), end="")
# coefficient estimate
for i in range(len(params)):
print ('{:>10.4f}'.format(bse[i,:].mean()), end=" ")
print(" "*116)
# Average coefficient
print('{:<25s}'.format("0 in 0.95-Conf. Int."), end="")
# coefficient estimate
for i in range(len(params)):
print ('{:>10.4f}'.format(sum(in_ci[i,:])/N ), end=" ")
print(" "*116)
print('\u2014'*100)
print('The first row contains the average of the estimated coefficient of D. The second row contains the')
print('average of the corresponding standard error. The last row shows the relative frequency of the event')
print('that 0 (the overall effect) was included in the 95%-confidence interval.')
# increase timespan
def increase_available_timespan(t_max):
z_ = np.linspace(-90, t_max, num = (t_max + 91), dtype = int)
D_ = np.where(z_ < 0, 0, 1)
sin_ = np.zeros(len(z_))
for i in z_:
sin_[i] = math.sin(0.5*z_[i])
T_ = np.where( (z_ < 0) | (z_ > 4*math.pi), 0, sin_)
N_ = 1000
params_ = np.zeros(N_, np.float)
bse_ = np.zeros(N_, np.float)
in_ci_ = np.zeros(N_, np.float)
q_ = ss.norm.ppf(0.975)
for n in range(N_):
y_ = 20 + 0.05*z_ + T_ + np.random.normal(0, 0.5, len(z_))
df_ = pd.DataFrame(data = {'y': y_, 'z': z_, 'D': D_})
reg = smf.ols(formula = 'y_ ~ z_ + D_ + D_*z_', data = df_).fit(cov_type='HC1')
ci_lower = []
ci_upper = []
params_[n] = reg.params['D_']
bse_[n] = reg.bse['D_']
ci_lower = reg.params['D_'] - q_*reg.bse['D_']
ci_upper = reg.params['D_'] + q_*reg.bse['D_']
if ci_lower <= 0 <= ci_upper:
in_ci_[n] = 1
else:
in_ci_[n] = 0
return params_ | 2.546875 | 3 |
setup.py | iterativo-git/pycybersource | 0 | 12794508 | #!/usr/bin/env python
"""
A light wrapper for Cybersource SOAP Toolkit API
"""
import os
import sys
from setuptools import setup, find_packages
import pycybersource
# fix permissions for sdist
if 'sdist' in sys.argv:
os.system('chmod -R a+rX .')
os.umask(int('022', 8))
base_dir = os.path.dirname(__file__)
with open(os.path.join(base_dir, 'README.md'), 'rb') as fp:
long_description = fp.read().decode('utf-8')
setup(
name='pycybersource',
version='0.1.2a0',
description='A light wrapper for Cybersource SOAP Toolkit API',
author='<NAME>',
author_email='<EMAIL>',
url='',
platforms=['Platform Independent'],
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['pycybersource'],
keywords='cybersource payment soap zeep api wrapper',
requires=['zeep'],
install_requires=['zeep'],
test_suite='pycybersource.tests',
)
| 1.398438 | 1 |
dailyfresh/apps/goods/views.py | litaiji/dailyfresh | 0 | 12794509 | <reponame>litaiji/dailyfresh<filename>dailyfresh/apps/goods/views.py
from django.shortcuts import render
# Create your views here.
from django.views.generic import View
from django.core.cache import cache
from django_redis import get_redis_connection
from goods.models import GoodsType
from goods.models import IndexGoodsBanner
from goods.models import IndexPromotionBanner
from goods.models import IndexTypeGoodsBanner
class IndexView(View):
'''首页'''
def get(self, request):
'''显示首页'''
# 尝试从缓存获取数据
context = cache.get('index_page_data')
if context is None:
# 缓存中没有数据
# 获取商品种类信息
types = GoodsType.objects.all()
# 获取首页轮播商品信息
goods_banners = IndexGoodsBanner.objects.all().order_by('index')
# 获取首页商品促销活动信息
promotin_banners = IndexPromotionBanner.objects.all().order_by('index')
# 获取首页分类商品展示信息
for type in types:
# 获取type种类首页分类商品的图片信息
image_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1).order_by('index')
# 获取type种类首页分类商品的文字信息
title_banners = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0).order_by('index')
# 动态给type增加属性,分别保存首页分类商品的图片展示信息和文字展示信息
type.image_banners = image_banners
type.title_banners = title_banners
context = {
'types': types,
'goods_banners': goods_banners,
'promotin_banners': promotin_banners,
}
# 设置缓存
# key value timeout
cache.set('index_page_data', context, 3600)
# 获取用户购物车中的商品的数目
user = request.user
cart_count = 0
if user.is_authenticated():
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
cart_count = conn.hlen(cart_key)
else:
cart_count = 0
# 组织上下文
context.update(cart_count=cart_count)
return render(request, 'index.html', context)
def post(self, request):
pass
| 2.140625 | 2 |
CreateCommunities.py | EUDAT-Training/B2FIND-Training | 7 | 12794510 | #!/usr/bin/env python
import sys, os, optparse, time
from os.path import expanduser
PY2 = sys.version_info[0] == 2
if PY2:
from urllib import quote
from urllib2 import urlopen, Request
from urllib2 import HTTPError,URLError
else:
from urllib import parse
from urllib.request import urlopen, Request
from urllib.error import HTTPError,URLError
from output import Output
import json
import pprint
import random, string
p = optparse.OptionParser(
description = '''Description
===========
Management of B2FIND communities within EUDAT-B2FIND, comprising
- Creating communities, i.e. CKAN groups
- .....
''',
formatter = optparse.TitledHelpFormatter(),
prog = 'CreateCommuities.py',
version = "%prog " + 'v0.1',
usage = "%prog [options] COMMUNITY"
)
p.add_option('-v', '--verbose', action="count", help="increase output verbosity (e.g., -vv is more than -v)", default=False)
p.add_option('--iphost', '-i', help="IP adress of B2FIND portal (CKAN instance)", metavar='IP')
p.add_option('--auth', help="Authentification for CKAN API (API key, by default taken from file $HOME/.netrc)",metavar='STRING')
p.add_option('--jobdir', help='\ndirectory where log, error and html-result files are stored. By default directory is created as startday/starthour/processid .', default=None)
p.add_option('--mode', '-m', metavar='PROCESSINGMODE', help='\nSupported modes are (c)reate, (u)pdate, (patch), (d)elete, (p)urge and (s)how . default is creation of a group', default='c')
options,arguments = p.parse_args()
pstat=dict()
now = time.strftime("%Y-%m-%d %H:%M:%S")
jid = os.getpid()
OUT = Output(pstat,now,jid,options)
logger = OUT.setup_custom_logger('root',options.verbose)
community=sys.argv[1]
conffile='mapfiles/%s.json' % community
with open(conffile, 'r') as f:
group_dict = json.load(f)
# checking given options:
if (not options.iphost):
logger.critical('The option iphost is mandatory !')
sys.exit()
if (not options.auth):
home = os.path.expanduser("~")
if (not os.path.isfile(home+'/.netrc')):
logger.critical('Can not access job host authentification file %s/.netrc ' % home )
sys.exit()
else:
f = open(home+'/.netrc','r')
lines=f.read().splitlines()
f.close()
l = 0
for host in lines:
if(options.iphost == host.split()[0]):
options.auth = host.split()[1]
break
if (not options.auth):
logger.critical('API key is neither given by option --auth nor can retrieved from %s/.netrc' % home )
sys.exit()
print('aaauth %s' % options.auth)
if options.mode == 'c' :
action='group_create'
##elif options.mode == 'u' :
## action='group_update'
## group_dict['id']=group_dict['name']
elif options.mode == 'patch' :
action='group_patch'
group_dict['id']=group_dict['name']
elif options.mode == 'd' :
action='group_delete'
elif options.mode == 'p' :
action='group_purge'
group_dict['id']=group_dict['name']
elif options.mode == 's' :
action='group_show'
group_dict['id']=group_dict['name']
else :
logger.critical('Mode %s not supported' % options.mode)
sys.exit(-1)
##HEW-T print('group_dict %s' % group_dict)
if (True):
##for group_dict in groupsdict.itervalues() :
##HEW-T print('group_dict:\t%s\n' % (group_dict))
# Use the json module to dump the dictionary to a string for posting.
### data_string = urllib.parse.quote(json.dumps(dataset_dict))
encoding='utf-8'
if PY2 :
data_string = quote(json.dumps(group_dict))##.encode("utf-8") ## HEW-D 160810 , encoding="latin-1" ))##HEW-D .decode(encoding)
else :
data_string = parse.quote(json.dumps(group_dict)).encode(encoding) ## HEW-D 160810 , encoding="latin-1" ))##HEW-D .decode(encoding)
# The action that should be excecuted.
apiaction='http://%s/api/action/%s' % (options.iphost,action)
print('API action excecuted : %s' % apiaction)
request = Request(apiaction,data_string)
# Creating a group requires an authorization header.
request.add_header('Authorization', options.auth)
# Make the HTTP request.
###Py2 response = urllib.request.urlopen(request, data_string)
try:
response = urlopen(request)
assert response.code == 200
except HTTPError as e:
logger.critical('%s : Can not excecute the HTTP request' % e)
sys.exit(-1)
# Use the json module to load CKAN's response into a dictionary.
## print('Response %s' % response.read().decode('utf-8'))
response_dict = response.read().decode('utf-8')
##HEW-T print('Response %s' % response_dict)
response_dict = json.loads(response_dict)
## assert response_dict["success"] is True
# package_create returns the created package as its result.
created_package = response_dict['result']
print('Response:')
pprint.pprint(created_package)
| 2.15625 | 2 |
Python 3 Programming/ex_5_8.py | ElizaLo/Practice-Python | 5 | 12794511 | <reponame>ElizaLo/Practice-Python<filename>Python 3 Programming/ex_5_8.py
import turtle
import math
wn = turtle.Screen()
wn.bgcolor("SkyBlue")
bob = turtle.Turtle()
bob.right(90)
for _ in range(4):
bob.forward(50)
bob.left(90)
bob.right(225)
distance = math.sqrt(50*50 / 2)
bob.forward(distance)
bob.right(90)
bob.forward(distance)
wn.exitonclick()
| 3.921875 | 4 |
bin/ipynb2rst.py | albapa/QUIP | 229 | 12794512 | #!/usr/bin/env python3
import sys
import os
import glob
if len(sys.argv[1:]) == 0:
dirs = [os.getcwd()]
else:
dirs = sys.argv[1:]
for dir in dirs:
for notebook in glob.glob(os.path.join(dir, '*.ipynb')):
cmd = 'ipython nbconvert --to rst {0}'.format(notebook)
print(cmd)
os.system(cmd)
| 2.453125 | 2 |
DBP/models/user.py | Pusnow/DB-Project | 0 | 12794513 | #-*- coding: utf-8 -*-
from DBP.models import Base,session
from sqlalchemy import Column, Integer, Unicode, Enum, Date, String
from sqlalchemy import Table, ForeignKey, PrimaryKeyConstraint
from sqlalchemy.sql.expression import label
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from datetime import datetime
import random
from werkzeug.security import generate_password_hash, check_password_hash
enrollcode = {
"Waiting" : u"승인 대기중",
"Approved" : u"승인 완료",
"Refused" : u"승인 거절",
}
class Enroll(Base):
__tablename__ = 'Enroll'
__table_args__ = (PrimaryKeyConstraint('taskprefix','userid',name='enroll_pk'),)
taskprefix = Column('taskprefix', Unicode(100), ForeignKey('Task.prefix'), nullable = False)
userid = Column('userid', Integer, ForeignKey('User.id'), nullable = False)
status = Column('status',Enum(u"Waiting",u"Approved",u"Refused"), nullable = False , server_default = "Waiting")
user = relationship("User", backref="enrolls")
class User(Base):
__tablename__ = 'User'
id = Column(Integer, primary_key=True, autoincrement = True, nullable = False)
loginid = Column(Unicode(100), unique = True, nullable = False)
password = Column(String(100), nullable = False)
name = Column(Unicode(100), nullable = False)
gender = Column(Enum(u"남자", u"여자"), nullable = False, server_default = u"남자")
address = Column(Unicode(255))
role = Column(Enum(u"관리자", u"제출자", u"평가자"), nullable = False, server_default = u"제출자")
score = Column(Integer, server_default = "0", nullable = False)
birth = Column(Date)
cellphone = Column(Unicode(15))
def __init__(self,loginid,name,password):
self.loginid = loginid
self.name = name
self.password = generate_password_hash(password)
def checkPassword(self,password):
return check_password_hash(self.password, password)
def dict(self):
tslist = []
for x in self.enrolls:
x.task.setTables()
ts = x.task.dict()
ts["parsednum"] = x.task.getParsedNumBySubmitter(self)
ts["tasknum"] = x.task.getTaskNumBySubmitter(self)
tslist.append(ts)
data = {"id" : self.id,
"loginid" : self.loginid,
"name" : self.name,
"gender" : self.gender,
"address" : self.address,
"role" : self.role,
"score" : self.score,
"birthstring" : self.birth,
"cellphone" : self.cellphone,
"tasks" : tslist
}
if data["birthstring"] :
data["birthstring"] = data["birthstring"].isoformat()
return data
def enrollStatus(self):
enrolls = list()
for enroll in self.enrolls:
task = enroll.task.dict()
if task["status"] == "Stop":
task["status"] = u"수집 종료"
else :
task["status"] = enrollcode[enroll.status]
enrolls.append(task)
return enrolls
def editInfo(self, name, password, gender, address, birth, cellphone):
if password.strip() != "":
self.password = generate_password_hash(password)
self.name = name
self.gender = gender
self.address = address
self.birth = datetime.strptime(birth, "%a %b %d %Y").date()
self.cellphone = cellphone
session.commit()
def setScore(self):
sums = list()
for en in self.enrolls:
en.task.setTables()
ps = session.query(en.task.parsed).filter(en.task.parsed.submitterid == self.id).filter(en.task.parsed.status == u"Evaluated").all()
for p in ps:
sums.append(p.score)
self.score = sum(sums)/len(sums)
def getSubmitInfo(self):
info = self.dict()
submitinfo = dict(parsed = 0, taskdata = 0)
for en in self.enrolls:
en.task.setTables()
submitinfo["parsed"] += session.query(en.task.parsed).filter(en.task.parsed.submitterid == self.id).count()
ps = session.query(en.task.parsed).filter(en.task.parsed.submitterid == self.id).filter(en.task.parsed.status == u"Evaluated").all()
for p in ps:
submitinfo["taskdata"] += len(p.tasks)
info["submitinfo"] = submitinfo
return info
def setStatus(self,status):
self.status = status
session.commit()
@staticmethod
def randomEvaluator():
maxnum = session.query(User).filter(User.role == u"평가자").count()
if maxnum == 0:
return session.query(User).filter(User.role == u"관리자").first()
return session.query(User).filter(User.role == u"평가자")[random.randrange(0,maxnum)]
@staticmethod
def getUser(id):
return session.query(User).get(id)
@staticmethod
def getUsers():
return session.query(User).order_by(User.id).all()
@staticmethod
def newUser(loginid, password, name, gender, address , role, birth,cellphone):
user = User(loginid, name, password)
user.gender = gender
user.address = address
user.role = role
user.birth = datetime.strptime(birth, "%a %b %d %Y").date()
user.cellphone = cellphone
session.add(user)
session.commit()
@staticmethod
def login(loginid, password):
user = session.query(User).filter(User.loginid == loginid).first()
if user and user.checkPassword(password) :
return user
else :
return None
@staticmethod
def deleteUser(user):
for en in user.enrolls:
en.task.setTables()
ps = session.query(en.task.parsed).filter(en.task.parsed.submitterid == user.id).all()
for p in ps:
for t in p.tasks:
session.delete(t)
session.delete(p)
for e in user.enrolls :
session.delete(e)
session.delete(user)
session.commit()
| 2.4375 | 2 |
doors-detector/doors_detector/evaluators/model_evaluator.py | micheleantonazzi/master-thesis-robust-door-detector | 0 | 12794514 | from abc import abstractmethod
from typing import List, Dict
from src.bounding_box import BoundingBox
from src.utils.enumerators import BBType, BBFormat
import torch.nn.functional as F
class ModelEvaluator:
def __init__(self):
self._gt_bboxes = []
self._predicted_bboxes = []
self._img_count = 0
def get_gt_bboxes(self) -> List[BoundingBox]:
"""
Returns a list containing the ground truth bounding boxes
:return:
"""
return self._gt_bboxes
def get_predicted_bboxes(self) -> List[BoundingBox]:
"""
Returns a list containing the predicted bounding boxes
:return:
"""
return self._predicted_bboxes
def add_predictions(self, targets, predictions):
img_count_temp = self._img_count
for target in targets:
for label, [x, y, w, h] in zip(target['labels'].tolist(), target['boxes'].tolist()):
self._gt_bboxes.append(BoundingBox(
image_name=str(self._img_count),
class_id=str(label),
coordinates=(x - w / 2, y - h / 2, w, h),
bb_type=BBType.GROUND_TRUTH,
format=BBFormat.XYWH,
))
self._img_count += 1
pred_logits, pred_boxes_images = predictions['pred_logits'], predictions['pred_boxes']
prob = F.softmax(pred_logits, -1)
scores_images, labels_images = prob[..., :-1].max(-1)
for scores, labels, pred_boxes in zip(scores_images, labels_images, pred_boxes_images):
for score, label, [x, y, w, h] in zip(scores, labels, pred_boxes):
label = label.item()
score = score.item()
if label >= 0:
self._predicted_bboxes.append(
BoundingBox(
image_name=str(img_count_temp),
class_id=str(label),
coordinates=(x - w / 2, y - h / 2, w, h),
bb_type=BBType.DETECTED,
format=BBFormat.XYWH,
confidence=score
)
)
img_count_temp += 1
@abstractmethod
def get_metrics(self) -> Dict:
pass
| 2.359375 | 2 |
publishstatic/management/commands/storage/s3.py | scuml/publishstatic | 1 | 12794515 | <filename>publishstatic/management/commands/storage/s3.py
import boto
from ..common import get_required_env_variable
class S3Storage(object):
def __init__(self, bucket=None):
"""
Establish connections.
Assume credentials are in environment or
in a config file.
"""
self.s3_conn = boto.connect_s3()
# Have a default bucket, so individual function calls don't
# need to query global settings and pass the bucket each time.
# self.bucket will only be used in common utility functions, such
# an download_file and upload_file, nothing "dangerous" like
# "empty_bucket" or "delete_bucket."
if bucket is not None:
self.bucket = bucket
get_required_env_variable('AWS_ACCESS_KEY_ID')
get_required_env_variable('AWS_SECRET_ACCESS_KEY')
def upload_contents(self, content, key, headers):
headers['x-amz-acl'] = 'public-read'
key = self.bucket.new_key(key)
key.set_contents_from_file(content, headers)
return key
def close(self):
"""
Close connection.
"""
self.s3_conn.close()
| 2.75 | 3 |
pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/errors.py | wangyum/anaconda | 0 | 12794516 | """
Numba-specific errors and warnings.
"""
from __future__ import print_function, division, absolute_import
import contextlib
from collections import defaultdict
import warnings
# Filled at the end
__all__ = []
class NumbaWarning(Warning):
"""
Base category for all Numba compiler warnings.
"""
class PerformanceWarning(NumbaWarning):
"""
Warning category for when an operation might not be
as fast as expected.
"""
class WarningsFixer(object):
"""
An object "fixing" warnings of a given category caught during
certain phases. The warnings can have their filename and lineno fixed,
and they are deduplicated as well.
"""
def __init__(self, category):
self._category = category
# {(filename, lineno, category) -> messages}
self._warnings = defaultdict(set)
@contextlib.contextmanager
def catch_warnings(self, filename=None, lineno=None):
"""
Store warnings and optionally fix their filename and lineno.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', self._category)
yield
for w in wlist:
msg = str(w.message)
if issubclass(w.category, self._category):
# Store warnings of this category for deduplication
filename = filename or w.filename
lineno = lineno or w.lineno
self._warnings[filename, lineno, w.category].add(msg)
else:
# Simply emit other warnings again
warnings.warn_explicit(msg, w.category,
w.filename, w.lineno)
def flush(self):
"""
Emit all stored warnings.
"""
for (filename, lineno, category), messages in sorted(self._warnings.items()):
for msg in sorted(messages):
warnings.warn_explicit(msg, category, filename, lineno)
self._warnings.clear()
class NumbaError(Exception):
pass
class IRError(NumbaError):
"""
An error occurred during Numba IR generation.
"""
class RedefinedError(IRError):
pass
class NotDefinedError(IRError):
def __init__(self, name, loc=None):
self.name = name
self.loc = loc
def __str__(self):
loc = "?" if self.loc is None else self.loc
return "{name!r} is not defined in {loc}".format(name=self.name,
loc=self.loc)
class VerificationError(IRError):
pass
class MacroError(NumbaError):
"""
An error occurred during macro expansion.
"""
class DeprecationError(NumbaError):
pass
class LoweringError(NumbaError):
"""
An error occurred during lowering.
"""
def __init__(self, msg, loc):
self.msg = msg
self.loc = loc
super(LoweringError, self).__init__("%s\n%s" % (msg, loc.strformat()))
class ForbiddenConstruct(LoweringError):
"""
A forbidden Python construct was encountered (e.g. use of locals()).
"""
class TypingError(NumbaError):
"""
A type inference failure.
"""
def __init__(self, msg, loc=None):
self.msg = msg
self.loc = loc
if loc:
super(TypingError, self).__init__("%s\n%s" % (msg, loc.strformat()))
else:
super(TypingError, self).__init__("%s" % (msg,))
class UntypedAttributeError(TypingError):
def __init__(self, value, attr, loc=None):
msg = 'Unknown attribute "{attr}" of type {type}'.format(type=value,
attr=attr)
super(UntypedAttributeError, self).__init__(msg, loc=loc)
class ByteCodeSupportError(NumbaError):
"""
Failure to extract the bytecode of the user's function.
"""
class CompilerError(NumbaError):
"""
Some high-level error in the compiler.
"""
class ConstantInferenceError(NumbaError):
"""
Failure during constant inference.
"""
__all__ += [name for (name, value) in globals().items()
if not name.startswith('_') and isinstance(value, type)
and issubclass(value, (Exception, Warning))]
| 2.484375 | 2 |
pyroscope/__init__.py | pyroscope-io/pyroscope-python | 9 | 12794517 | from collections import namedtuple
from contextlib import contextmanager
from pyroscope import agent
Config = namedtuple('Config', ('app_name', 'server_address',
'auth_token', 'sample_rate', 'with_subprocesses', 'log_level'))
class PyroscopeError(Exception):
pass
def configure(app_name, server_address, auth_token="", sample_rate=100, with_subprocesses=0, log_level="debug", tags=None):
agent.start(app_name, server_address, auth_token, sample_rate, int(with_subprocesses), log_level)
if tags is not None:
tag(tags)
def stop():
agent.stop()
def change_name(name):
agent.change_name(name)
@contextmanager
def tag_wrapper(tags):
for key, value in tags.items():
agent.set_tag(key, value)
try:
yield
finally:
for key in tags.keys():
agent.set_tag(key, "")
def tag(tags):
for key, value in tags.items():
agent.set_tag(key, value)
def remove_tags(*keys):
for key in keys:
agent.set_tag(key, "")
def build_summary():
return agent.build_summary()
def test_logger():
agent.test_logger()
| 2.515625 | 3 |
06. Python Essentials/04. For Loops/08. Number sequence.py | tdrv90/softuni-courses | 0 | 12794518 | <filename>06. Python Essentials/04. For Loops/08. Number sequence.py<gh_stars>0
numbers = []
count_of_nums = int(input())
for i in range(count_of_nums):
n = int(input())
numbers.append(n)
print(f'Max number: {max(numbers)}')
print(f'Min number: {min(numbers)}')
| 3.953125 | 4 |
p019.py | piohhmy/euler | 0 | 12794519 | <reponame>piohhmy/euler
"""
1 Jan 1900 was a Monday.
Thirty days has September, April, June and November.
All the rest have thirty-one,
Saving February alone, Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
"""
def days_in(year, month):
if month == 4 or month == 6 or month == 9 or month == 11:
return 30
elif month == 2:
if year % 4 == 0:
return 29
else:
return 28
else:
return 31
def is_sunday(days_post_jan_1_1901):
# 1st sunday in 1901 is Jan 6
return days_post_jan_1_1901 % 7 == 6
def sunday_as_first_of_month_from_1901_2000():
total_days = 1
sundays = 0
for year in range(1901, 2001):
for month in range(1, 13):
total_days += days_in(year, month)
if is_sunday(total_days):
sundays +=1
return sundays
| 3.828125 | 4 |
cogs/Exchange.py | 34-Matt/Trickster | 0 | 12794520 | import discord
from discord.ext import commands
from forex_python.converter import CurrencyRates,CurrencyCodes
from datetime import date
class Exchange(commands.Cog):
def __init__(self,bot):
self.bot = bot
self.exchangeNames = {
"EUR":["eur","euro member countries"],
"IDR":["idr","indonesia rupiah"],
"BGN":["bgn","bulgaria lev"],
"ILS":["ils","israel shekel"],
"GBP":["gbp","united kingdom pound"],
"DKK":["dkk","denmark krone"],
"CAD":["cad","canada dollar"],
"JPY":["jpy","japan yen"],
"HUF":["huf","hungary forint"],
"RON":["ron","Romania New Leu"],
"MYR":["myr","malaysia ringgit"],
"SEK":["sek","sweden krona"],
"SGD":["sgd","singapore dollar"],
"HKD":["hkd","hong kong dollar"],
"AUD":["aud","australia dollar"],
"CHF":["chf","switzerland franc"],
"KRW":["krw","korea won","korea south won"],
"CNY":["cny","china yun renminbi"],
"TRY":["try","turkey lira"],
"HRK":["hrk","croatia kuna"],
"NZD":["nzd","new zealand dollar"],
"THB":["thb","thailand baht"],
"USD":["usd","united states dollar"],
"NOK":["nok","norway krone"],
"RUB":["rub","russia ruble"],
"INR":["inr","india ruppe"],
"MXN":["mxn","mexico peso"],
"CZK":["czh","czech republic koruna"],
"BRL":["brl","brazil real"],
"PLN":["pln","poland zloty"],
"PHP":["php","philippines peso"],
"ZAR":["zar","south africa rand"]
}
self.CurrencyRates = CurrencyRates()
self.CurrencyCodes = CurrencyCodes()
@commands.command()
async def ExchangeRate(self,ctx):
''' Gets exchange rate from between two currencies.
$ExchangeRate USD to JPY => The exchange rate from USD to JPY is xxx.xx
'''
letters = ctx.message.content.split(maxsplit=1)[1]
letters = letters.lower()
letters = letters.split("to")
fromAddress = letters[0].strip()
toAddress = letters[1].strip()
fromID = self.getAddressName(fromAddress)
toID = self.getAddressName(toAddress)
fromCode = self.CurrencyCodes.get_symbol(fromID)
toCode = self.CurrencyCodes.get_symbol(toID)
if fromID == -1:
await ctx.send("Was unable to find currency for {}".format(fromAddress))
elif toID == -1:
await ctx.send("Was unable to find currency for {}".format(toAddress))
else:
rate = self.CurrencyRates.get_rate(fromID,toID)
await ctx.send("The exchange rate from {}1 is {}{:.2f}".format(fromCode,toCode,rate))
def getAddressName(self,address):
'''Gets the proper address name for desired currency
address is the name of the desired currency
returns the id of the desired currency or -1 if none are valid
'''
for id,addArray in self.exchangeNames.items():
if address in addArray:
return id
return -1
def setup(bot):
bot.add_cog(Exchange(bot))
| 2.734375 | 3 |
test/test_quadrature.py | mmechelke/bayesian_xfel | 0 | 12794521 | <reponame>mmechelke/bayesian_xfel
import unittest
from bxfel.orientation.quadrature import GaussSO3Quadrature, ChebyshevSO3Quadrature
import numpy as np
class TestGauss(unittest.TestCase):
def test_init(self):
g = GaussSO3Quadrature(1)
self.assertEqual(g.m, 4)
self.assertTrue(g._R is not None)
self.assertTrue(g._w is not None)
self.assertEqual(g._w[0], 0.25)
self.assertTrue(np.array_equal(g._R[0].ravel(),
np.eye(3).ravel()))
def test_Rotation(self):
i = np.random.randint(1,10)
g = GaussSO3Quadrature(i)
for R in g._R:
self.assertAlmostEqual(np.linalg.det(R), 1.)
class TestGauss(unittest.TestCase):
def test_init(self):
g = GaussSO3Quadrature(1)
self.assertEqual(g.m, 4)
self.assertTrue(g._R is not None)
self.assertTrue(g._w is not None)
self.assertEqual(g._w[0], 0.25)
self.assertTrue(np.array_equal(g._R[0].ravel(),
np.eye(3).ravel()))
def test_Rotation(self):
i = np.random.randint(1,10)
g = GaussSO3Quadrature(i)
for R in g._R:
self.assertAlmostEqual(np.linalg.det(R), 1.)
if __name__ == "__main__":
unittest.main()
| 2.46875 | 2 |
utils/nms_wrapper.py | songheony/MOTDT | 0 | 12794522 | <reponame>songheony/MOTDT<filename>utils/nms_wrapper.py
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
from torchvision.ops import nms
def nms_detections(pred_boxes, scores, nms_thresh):
pred_boxes = torch.FloatTensor(pred_boxes)
scores = torch.FloatTensor(scores)
keep = nms(pred_boxes, scores, nms_thresh)
return keep
| 1.742188 | 2 |
metrics/metrics_interface.py | DominikSpiljak/imdb-review-classifier | 0 | 12794523 | <gh_stars>0
class Metric:
def initialize(self):
pass
def log_batch(self, predicted, ground_truth):
pass
def compute(self):
pass
| 1.554688 | 2 |
venv/Lib/site-packages/timingsutil/stopwatch.py | avim2809/CameraSiteBlocker | 0 | 12794524 | # encoding: utf-8
import logging_helper
from .timeout import TimersBase
logging = logging_helper.setup_logging()
class Stopwatch(TimersBase):
def __init__(self,
high_precision=None):
super(Stopwatch, self).__init__(high_precision=high_precision)
self.reset()
def reset(self):
self.__stop_time = None
self.__laps = []
self.__start_time = self._now
def stop(self):
if self.__stop_time is None:
self.__stop_time = self._now
return self.glance
else:
return self.__stop_time
def lap(self):
lap_end_time = self._now
lap_start_time = (self.__start_time
if not self.__laps
else self.__laps[-1][u'lap_end_time'])
self.__laps.append({
u'lap_start_time': lap_start_time,
u'lap_end_time': lap_end_time,
u'lap_time': lap_end_time - lap_start_time
})
return self.__laps[-1][u'lap_time']
@property
def lap_times(self):
return self.__laps
@property
def glance(self):
if self.__stop_time:
return self.__stop_time - self.__start_time
else:
return self._now - self.__start_time
| 3.015625 | 3 |
pycap/ping.py | Blueswing/pycap | 0 | 12794525 | import select
import socket
import struct
import time
import uuid
from collections import deque
from .icmp import parse_icmp_packet
from .ip import get_ip_address, parse_ipv4_packet
_FMT_ICMP_PACKET = '>BBHHH'
def chesksum(data):
n = len(data)
m = n % 2
sum_ = 0
for i in range(0, n - m, 2):
# 传入data以每两个字节(十六进制)通过ord转十进制,第一字节在低位,第二个字节在高位
sum_ += (data[i]) + ((data[i + 1]) << 8)
if m:
sum_ += (data[-1])
# 将高于16位与低16位相加
sum_ = (sum_ >> 16) + (sum_ & 0xffff)
# 如果还有高于16位,将继续与低16位相加
sum_ += (sum_ >> 16)
answer = ~sum_ & 0xffff
# 主机字节序转网络字节序列(参考小端序转大端序)
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def dealtime(dst_addr, sumtime, shorttime, longtime, accept, i, time):
sumtime += time
print(sumtime)
if i == 4:
print("{0}的Ping统计信息:".format(dst_addr))
msg = "数据包:已发送={0},接收={1},丢失={2}({3}%丢失),\n往返行程的估计时间(以毫秒为单位):\n\t最短={4}ms,最长={5}ms,平均={6}ms"
print(msg.format(i + 1, accept, i + 1 - accept, (i + 1 - accept) / (i + 1) * 100, shorttime, longtime, sumtime))
class TimedData:
def __init__(self, data, ts):
self.data = data
self.ts = ts
class MovingStatistic:
def __init__(self, duration):
self._duration = duration
self._q = deque()
def update(self, data):
now = time.time()
self._q.append(TimedData(data, now))
while len(self._q) > 0 and now - self._q[0].ts > self._duration:
self._q.popleft()
class PingStat(MovingStatistic):
def total(self):
return len(self._q)
# def success(self):
# return sum(err is None for _, err in self._q)
def failure(self):
return sum(item.data[1] is not None for item in self._q)
def failure_ratio(self):
total = self.total()
if total == 0:
return 0.0
return self.failure() / total
def time_avg(self):
cnt = 0
sum_t = 0.0
for item in self._q:
t, err = item.data
if err is None:
cnt += 1
sum_t += t
if cnt == 0:
return 0.0
return sum_t / cnt
def _get_random_payload(length):
if length == 0:
return b''
n = (length // 16) + 1
if n == 1:
return uuid.uuid4().bytes[:length]
else:
return (uuid.uuid4().bytes * n)[:length]
def ping(addr: str, interval=3.0, timeout=3.0):
stat = PingStat(60.0)
rawsocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
data_sequence = 1
while True:
time_elapsed, err = _ping_once(rawsocket, addr, data_sequence, _get_random_payload(64), timeout)
data_sequence = (data_sequence + 1) % 0xffff
stat.update((time_elapsed, err))
total = stat.total()
fail = stat.failure()
print('total:', total, ', failed:', fail, ', average time:', stat.time_avg())
time.sleep(interval)
def _ping_once(rawsocket, addr, data_sequence, payload, timeout):
try:
dst_addr = str(get_ip_address(addr))
except Exception as e:
return timeout, f'failed to resolve domain, {e}'
data_type = 8
data_code = 0
data_id = 0
icmp_packet = build_icmp_packet(data_type, data_code, data_id, data_sequence, payload)
t0 = time.time()
rawsocket.sendto(icmp_packet, (dst_addr, 0))
while True:
time_elapsed = time.time() - t0
if time_elapsed >= timeout:
return timeout, 'timeout'
rlist, _, _ = select.select([rawsocket], [], [], timeout - time_elapsed)
if len(rlist) == 0:
return timeout, 'timeout'
data, _ = rawsocket.recvfrom(1500)
time_elapsed = time.time() - t0
header, ip_payload = parse_ipv4_packet(data)
if header.protocol == 'icmp':
icmp_header, res_payload = parse_icmp_packet(ip_payload)
if payload == res_payload:
return time_elapsed, None
else:
continue
else:
continue
def build_icmp_packet(data_type, data_code, data_id, data_seq, payload):
l_payload = len(payload)
if l_payload == 0:
icmp_packet = struct.pack(_FMT_ICMP_PACKET, data_type, data_code, 0, data_id, data_seq)
icmp_chesksum = chesksum(icmp_packet)
icmp_packet = struct.pack(_FMT_ICMP_PACKET, data_type, data_code, icmp_chesksum, data_id, data_seq)
else:
fmt = _FMT_ICMP_PACKET + f'{l_payload}s'
icmp_packet = struct.pack(fmt, data_type, data_code, 0, data_id, data_seq, payload)
icmp_chesksum = chesksum(icmp_packet)
icmp_packet = struct.pack(fmt, data_type, data_code, icmp_chesksum, data_id, data_seq, payload)
return icmp_packet
def play_packet():
# print(socket.getaddrinfo(socket.gethostname(), None, family=socket.AddressFamily.AF_INET))
sock = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0800))
sock.bind(('wlp3s0', socket.htons(0x0800)))
header = struct.pack('>6s6s2s', b'\xaa\xaa\xaa\xaa\xaa\xaa', b'\xbb\xbb\xbb\xbb\xbb\xbb', b'\x08\x00')
packet = header + b'hello, world!'
sock.send(packet)
print(packet)
# print(res)
sock.close()
def arp_boardcast():
sock = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0800))
sock.bind(('wlp3s0', socket.htons(0x0800)))
ether_type = b'\x08\x06'
header = struct.pack('>6s6s2s', b'\xff\xff\xff\xff\xff\xff', b'\xbb\xbb\xbb\xbb\xbb\xbb', ether_type)
| 2.5625 | 3 |
runtime/data/script/__make_require_graph.py | CrueLu/elonafoobar | 1 | 12794526 | from glob import glob
import re
require_pattern = re.compile(r'\brequire\("(.*)"\)')
print("digraph require_graph {")
for path in glob("**/*.lua", recursive=True):
with open(path) as f:
caller = path.replace(".lua", "").replace("/", ".")
caller_node = caller.replace(".", "__")
print(f" {caller_node} [label=\"{caller}\"];")
for line in f.readlines():
match = require_pattern.search(line)
if match:
calee = match.group(1)
calee_node = calee.replace(".", "__")
print(f" {caller_node} -> {calee_node};")
print("}")
| 2.78125 | 3 |
Flopy_Tutorial1_Cheq.py | akurnizk/diked_hr_estuary_gw | 0 | 12794527 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 15:47:36 2018
@author: akurnizk
"""
import flopy
import numpy as np
import sys,os
import matplotlib.pyplot as plt
# Location of BitBucket folder containing cgw folder
cgw_code_dir = 'E:\python'
sys.path.insert(0,cgw_code_dir)
from cgw.utils import general_utils as genu
from cgw.utils import feature_utils as shpu
from cgw.utils import raster_utils as rastu
# Assign name and create modflow model object
modelname = 'CheqModel1'
work_dir = r'E:\Herring'
mf = flopy.modflow.Modflow(modelname, exe_name='mf2005',model_ws=work_dir)
swt = flopy.seawat.Seawat(modelname, exe_name='swtv4')
print(swt.namefile)
mean_sea_level = 0.843 # in meters at closest NOAA station
#%%
# Example of making a MODFLOW-like grid from a shapefile
data_dir = r'E:\ArcGIS'
shp_fname = os.path.join(data_dir,'Chequesset_Model_Area_UTM.shp')
cell_spacing = 10. # model grid cell spacing in meters
# Define inputs for shp_to_grid function
shp_to_grid_dict = {'shp':shp_fname,'cell_spacing':cell_spacing}
grid_outputs = shpu.shp_to_grid(**shp_to_grid_dict)
# Pop out all of the outputs into individual variables
[X_nodes,Y_nodes],model_polygon,[out_proj,[xshift,yshift],min_angle] = grid_outputs
grid_transform = [out_proj,[xshift,yshift],min_angle] # make transform list
# Can calculate cell centers (where heads are calculated), in different coordinates
cc,cc_proj,cc_ll = shpu.nodes_to_cc([X_nodes,Y_nodes],grid_transform)
# Use model_polygon to define active cells in the model
ir,ic,_ = shpu.gridpts_in_shp(model_polygon,cc)
active_cells = genu.define_mask(cc,[ir,ic])
"""
Plot active cells
"""
#fig,ax = genu.plt.subplots(1,2)
#genu.quick_plot(active_cells.astype(int),ax=ax[0]) # in row, column space
#ax[0].set_xlabel('column #')
#ax[0].set_ylabel('row #')
#c1=ax[1].pcolormesh(cc[0],cc[1],active_cells.astype(int)) # in model coordinates
#genu.plt.colorbar(c1,ax=ax[1],orientation='horizontal')
#ax[1].set_xlabel('X [m]')
#ax[1].set_ylabel('Y [m]')
#%% Example of loading DEM data for that area
dem_fname = os.path.join(data_dir,'Cheq10mx10m_UTM.tif')
# Experimental part \/
dem_X,dem_Y,dem_da = rastu.load_geotif(dem_fname) # da is an xarray data array
dem_vals = dem_da.values.squeeze()
#dem_X, dem_Y, dem_vals = rastu.read_griddata(dem_fname)
# Know that dem is way higher resolution...can decimate it to save time
decimate_by_ncells = 1 # by every n cells
#dem_X = dem_X[::decimate_by_ncells,::decimate_by_ncells]
#dem_Y = dem_Y[::decimate_by_ncells,::decimate_by_ncells]
#dem_vals = dem_vals[::decimate_by_ncells,::decimate_by_ncells]
# Set no-data value to nan
dem_vals[dem_vals==dem_da.nodatavals[0]] = genu.np.nan
# Transform dem to model coordinates with linear interpolation
trans_dict = {'orig_xy':[dem_X,dem_Y],'orig_val':dem_vals,'active_method':'linear',
'new_xy':cc_proj} # if dem in same projection as model boundary shp
dem_trans = rastu.subsection_griddata(**trans_dict)
dem_trans[dem_trans<-1000] = genu.np.nan
genu.quick_plot(dem_trans)
#%% DEM model inputs
Lx = np.amax(dem_X)-np.amin(dem_X)
Ly = np.amax(dem_Y)-np.amin(dem_Y)
zbot = -100 # if bottom of model is horizontal, approx. bedrock (check Masterson)
nlay = 1 # 1 layer model
nrow, ncol = cc[0].shape # to use when cheq_griddev is implemented
delr = cell_spacing
delc = cell_spacing
delv = (dem_trans - zbot) / nlay
botm = zbot
# Tutorial 1 model domain and grid definition
#Lx = 1000.
#Ly = 1000.
#ztop = 0.
#zbot = -50.
#nlay = 1
#nrow = 10
#ncol = 10
#delr = Lx/ncol
#delc = Ly/nrow
#delv = (ztop - zbot) / nlay
#botm = np.linspace(ztop, zbot, nlay + 1)
#%%
"""
Time Stepping
"""
# Time step parameters
total_length = 10 # days
dt = 6 # stress period time step, hrs
perlen_days = dt/24. # stress period time step, days
nper = int(total_length/perlen_days) # the number of stress periods in the simulation
nstp_default = dt/0.5 # stress period time step divided by step time length (to better interpolate tidal changes, set to 0.5 hrs)
perlen = [perlen_days]*nper # length of a stress period; each item in the matrix is the amount
# of elapsed time since the previous point (need to change the first)
perlen[0] = 1 # set first step as steady state
steady = [False]*nper
steady[0] = True # first step steady state
nstp = [nstp_default]*nper # number of time steps in a stress period
nstp[0] = 1
#Tutorial 2 default time step parameters
#nper = 3
#perlen = [1, 100, 100]
#nstp = [1, 100, 100]
#steady = [True, False, False]
#%% # Create the discretization (DIS) object
dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
top=dem_trans, botm=botm)
# Tutorial 1 DIS object
#dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
#top=dem_vals, botm=botm[1:])
# Tutorial 2 DIS object when transient conditions are implemented
# dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc,
# top=ztop, botm=botm[1:],
# nper=nper, perlen=perlen, nstp=nstp, steady=steady)
#%% # Variables for the BAS (basic) package
# Added 5/28/19
"""
Active cells and the like are defined with the Basic package (BAS), which is required for every MOD-FLOW model.
It contains the ibound array, which is used to specify which cells are active (value is positive), inactive (value is 0),
or fixed head (value is negative). The numpy package (aliased as np) can be used to quickly initialize the ibound array
with values of 1, and then set the ibound value for the first and last columns to −1. The numpy package (and Python, in general)
uses zero-based indexing and supports negative indexing so that row 1 and column 1, and row 1 and column 201, can be
referenced as [0,0], and [0,−1], respectively. Although this simulation is for steady flow, starting heads still need
to be specified. They are used as the head for fixed-head cells (where ibound is negative), and as a starting point to compute
the saturated thickness for cases of unconfined flow.
ibound = np.ones((1, 201))
ibound[0, 0] = ibound[0, -1] = -1
"""
ibound = np.ones((nlay, nrow, ncol), dtype=np.int32)
ibound[:,~active_cells] = 0 # far offshore cells are inactive
ibound[0,dem_trans<mean_sea_level] = -1 # fixed head for everything less than msl
ibound[:,np.isnan(dem_trans)] = 0 # nan cells are inactive
genu.quick_plot(ibound) # plots boundary condition: 1 is above mean sea level (msl), 0 is msl, -1 is under msl.
strt = np.ones((nlay, nrow, ncol), dtype=np.float32)
active_dem_heights = dem_trans[active_cells & ~np.isnan(dem_trans)]
strt[0, active_cells & ~np.isnan(dem_trans)] = active_dem_heights # start with freshwater at surface elevation
strt[0, dem_trans<mean_sea_level] = mean_sea_level # start with water at sea level
genu.quick_plot(strt) # plots starting condition
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
#%% # added 3/8/19 - creates matrix where hydraulic conductivities (hk = horiz, vk = vert) can be implemented
hk1 = np.ones((nlay,nrow,ncol), np.float)
hk1[:,:,:]=10. # everything set to 10 - use data? calculate?
vka1 = np.ones((nlay,nrow,ncol), np.float)
vka1[:,:,:]=10. # everything set to 10.
# Add LPF package to the MODFLOW model
lpf = flopy.modflow.ModflowLpf(mf, hk=hk1, vka=vka1, ipakcb=53)
#%%
"""
Transient General-Head Boundary Package
First, we will create the GHB object, which is of the following type:
flopy.modflow.ModflowGhb.
The key to creating Flopy transient boundary packages is recognizing that the
boundary data is stored in a dictionary with key values equal to the
zero-based stress period number and values equal to the boundary conditions
for that stress period. For a GHB the values can be a two-dimensional nested
list of [layer, row, column, stage, conductance]:
Datums for 8447435, Chatham, Lydia Cove MA
https://tidesandcurrents.noaa.gov/datums.html?units=1&epoch=0&id=8447435&name=Chatham%2C+Lydia+Cove&state=MA
"""
# Make list for stress period 1
# Using Mean Sea Level (MSL) in meters at closest NOAA station for stages
#stageleft = mean_sea_level
#stageright = mean_sea_level
#bound_sp1 = []
#for il in range(nlay):
# # Figure out looping through hk1 array to get hk values at each cell for changing conductance.
# condleft = hk1[0,0,0] * (stageleft - zbot) * delc
# condright = hk1[0,0,0] * (stageright - zbot) * delc
# for ir in range(nrow):
# bound_sp1.append([il, ir, 0, stageleft, condleft])
# bound_sp1.append([il, ir, ncol - 1, stageright, condright])
## Only 1 stress period for steady-state model
#print('Adding ', len(bound_sp1), 'GHBs for stress period 1.')
#
#stress_period_data = {0: bound_sp1}
#ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=stress_period_data)
# using single conductance value (see drain for modification based on Masterson, 2004)
conductance = 1000. # (modify 1000 to actual conductance)
bound_sp1 = []
stress_period_data = {0: bound_sp1}
ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=stress_period_data)
#%% # Add drain condition
#Darcy's law states that
#Q = -KA(h1 - h0)/(X1 - X0)
#Where Q is the flow (L3/T)
#K is the hydraulic conductivity (L/T)
#A is the area perpendicular to flow (L2)
#h is head (L)
#X is the position at which head is measured (L)
#Conductance combines the K, A and X terms so that Darcy's law can be expressed as
#Q = -C(h1 - h0)
#where C is the conductance (L2/T)
# https://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?drn.htm
# from Masterson, 2004
# C = KWL/M where
#C is hydraulic conductance of the seabed (ft2/d);
#K is vertical hydraulic conductivity of seabed deposits
#(ft/d);
#W is width of the model cell containing the seabed (ft);
#L is length of the model cell containing the seabed (ft);
#and
#M is thickness of seabed deposits (ft).
#The vertical hydraulic conductivity (K) of the seabed
#deposits in most of the study area was assumed to be 1 ft/d,
#which is consistent with model simulations of similar coastal
#discharge areas in other areas on Cape Cod (Masterson and
#others, 1998). In the area occupied by Salt Pond and Nauset
#Marsh, it was assumed that there were thick deposits of lowpermeability
#material (<NAME>, U.S. Geological Survey,
#oral commun., 2002) and the vertical hydraulic conductivity
#was set to 0.1 ft/d. The thickness of the seabed deposits was
#assumed to be half the thickness of the model cell containing the
#boundary.
# still using simple conductance
land_cells = active_cells & ~np.isnan(dem_trans) & (dem_trans>mean_sea_level)
landrows, landcols = land_cells.nonzero()
lrcec = {0:np.column_stack([np.zeros_like(landrows),landrows,landcols,dem_trans[land_cells],conductance*np.ones_like(landrows)])} # this drain will be applied to all stress periods
drn = flopy.modflow.ModflowDrn(mf, stress_period_data=lrcec)
#%% # Add recharge condition
# steady state, units in [m/day]?
rch = flopy.modflow.ModflowRch(mf, nrchop=3, rech=1.4e-3) # from https://pubs.usgs.gov/wsp/2447/report.pdf
#%% # Add OC package to the MODFLOW model
spd = {(0, 0): ['print head', 'print budget', 'save head', 'save budget']}
oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, compact=True)
#%% # Add PCG package to the MODFLOW model
pcg = flopy.modflow.ModflowPcg(mf)
#%% # Write the MODFLOW model input files
mf.write_input()
#%% # Run the MODFLOW model
success, buff = mf.run_model()
#%%
"""
Post-Processing the Results
Now that we have successfully built and run our MODFLOW model, we can look at the results.
MODFLOW writes the simulated heads to a binary data output file.
We cannot look at these heads with a text editor, but flopy has a binary utility that can be used to read the heads.
The following statements will read the binary head file and create a plot of simulated heads for layer 1:
"""
import flopy.utils.binaryfile as bf
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.subplot(1,1,1,aspect='equal')
hds = bf.HeadFile(os.path.join(work_dir,modelname+'.hds'))
head = hds.get_data(totim=hds.get_times()[-1])
head[head<-100] = np.nan
#levels = np.arange(1,10,1)
extent = (delr/2., Lx - delr/2., Ly - delc/2., delc/2.)
# headplot = plt.contour(head[0, :, :], levels=levels, extent=extent) #
headplot = plt.contour(head[0, :, :], extent=extent)
plt.xlabel('Lx')
plt.ylabel('Ly')
plt.colorbar(headplot) # plots heads as contours
#plt.colorbar.set_label('heads')
plt.savefig('CheqModel1a.png')
genu.quick_plot(head) # plots heads with color gradient
genu.quick_plot(dem_trans) # plots elevations
#%%
"""
Flopy also has some pre-canned plotting capabilities can can be accessed using the ModelMap class.
The following code shows how to use the modelmap class to plot boundary conditions (IBOUND),
plot the grid, plot head contours, and plot vectors:
"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
hds = bf.HeadFile(modelname+'.hds')
times = hds.get_times()
head = hds.get_data(totim=times[-1])
levels = np.linspace(0, 10, 11)
cbb = bf.CellBudgetFile(modelname+'.cbc')
kstpkper_list = cbb.get_kstpkper()
frf = cbb.get_data(text='FLOW RIGHT FACE', totim=times[-1])[0]
fff = cbb.get_data(text='FLOW FRONT FACE', totim=times[-1])[0]
#%%
"""
The pre-canned plotting doesn't seem to be able to allow averaging to reduce nrow and ncol
on the plot, making it difficult to plot a large grid. The commented section below uses the
modelmap class from Tutorial 1, followed by use of the plotting from the Henry Problem.
"""
#modelmap = flopy.plot.ModelMap(model=mf, layer=0)
#qm = modelmap.plot_ibound()
#lc = modelmap.plot_grid() # Need to fix grid to have fewer rows and columns
#cs = modelmap.contour_array(head, levels=levels)
#quiver = modelmap.plot_discharge(frf, fff, head=head)
#plt.savefig('CheqModel1b.png')
"""
# Load data (when implementing SEAWAT)
ucnobj = bf.UcnFile('MT3D001.UCN', model=swt)
times = ucnobj.get_times()
concentration = ucnobj.get_data(totim=times[-1])
"""
# Average flows to cell centers
qx_avg = np.empty(frf.shape, dtype=frf.dtype)
qx_avg[:, :, 1:] = 0.5 * (frf[:, :, 0:ncol-1] + frf[:, :, 1:ncol])
qx_avg[:, :, 0] = 0.5 * frf[:, :, 0]
qy_avg = np.empty(fff.shape, dtype=fff.dtype)
qy_avg[1:, :, :] = 0.5 * (fff[0:nlay-1, :, :] + fff[1:nlay, :, :])
qy_avg[0, :, :] = 0.5 * fff[0, :, :]
# Make the plot
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
#ax.imshow(concentration[:, 0, :], interpolation='nearest',
# extent=(0, Lx, 0, Ly))
y, x, z = dis.get_node_coordinates()
X, Y = np.meshgrid(x, y)
iskip = 3
ax.quiver(X[::iskip, ::iskip], Y[::iskip, ::iskip],
qx_avg[::iskip, 0, ::iskip], -qy_avg[::iskip, 0, ::iskip],
color='k', scale=5, headwidth=3, headlength=2,
headaxislength=2, width=0.0025)
plt.savefig('CheqModel1b.png')
plt.show()
#%%
"""
Post-Processing the Results
Once again, we can read heads from the MODFLOW binary output file, using the flopy.utils.binaryfile module. Included with the HeadFile object are several methods that we will use here: * get_times() will return a list of times contained in the binary head file * get_data() will return a three-dimensional head array for the specified time * get_ts() will return a time series array [ntimes, headval] for the specified cell
Using these methods, we can create head plots and hydrographs from the model results.:
"""
# headfile and budget file objects already created
# Setup contour parameters (levels already set)
extent = (delr/2., Lx - delr/2., delc/2., Ly - delc/2.)
print('Levels: ', levels)
print('Extent: ', extent)
# Make the plots
#Print statistics
print('Head statistics')
print(' min: ', head.min())
print(' max: ', head.max())
print(' std: ', head.std())
"""
Again, commented out section using modelmap
"""
## Flow right face and flow front face already extracted
##%%
##Create the plot
#f = plt.figure()
#plt.subplot(1, 1, 1, aspect='equal')
#
#
#modelmap = flopy.plot.ModelMap(model=mf, layer=0)
#qm = modelmap.plot_ibound()
##
## lc = modelmap.plot_grid()
#qm = modelmap.plot_bc('GHB', alpha=0.5)
#cs = modelmap.contour_array(head, levels=levels)
#plt.clabel(cs, inline=1, fontsize=10, fmt='%1.1f', zorder=11)
#quiver = modelmap.plot_discharge(frf, fff, head=head)
#
#mfc='black'
#plt.plot(lw=0, marker='o', markersize=8,
# markeredgewidth=0.5,
# markeredgecolor='black', markerfacecolor=mfc, zorder=9)
#plt.savefig('CheqModel2-{}.png')
"""
From <NAME>
"""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
im = ax.imshow(head[:, 0, :], interpolation='nearest',
extent=(0, Lx, 0, Ly))
ax.set_title('Simulated Heads')
| 2.3125 | 2 |
oracle/modules/reddit.py | Toofifty/Oracle2 | 1 | 12794528 | """
Oracle 2.0 IRC Bot
reddit.py plugin module
http://toofifty.me/oracle
"""
import urllib, json
from format import BOLD, RESET
def _init(bot):
print '\t%s loaded' % __name__
def parsereddit(l, b, i):
"""
!d Parse a Reddit link into it's information
!a <link>
!r user
"""
def parselink(link):
meta_data_link = link + '.json'
tables = urllib.urlopen(meta_data_link)
data = json.loads(tables.read())[0]['data']['children'][0]['data']
title = data['title']
author = data['author']
subreddit = data['subreddit']
score = data['score']
comments = data['num_comments']
mod_status = ' [M]' if data['distinguished'] == 'moderator' else ''
sticky_status = ' [sticky]' if data['stickied'] else ''
b.l_say(
'Reddit post: %s%s%s%s by %s%s%s' % (
BOLD, title, sticky_status, RESET, BOLD, author, mod_status
), i
)
b.l_say(
'Subreddit: %s | Score: %s | Comments: %s' \
% (subreddit, score, comments), i
)
if i.args > 0:
for link in i.args:
if '.reddit.com/' in i.args[0].lower():
parselink(i.args[0].lower())
else:
b.l_say('Usage: .parsereddit <link>', i, 0)
return True
def reddit(l, b, i):
"""
!d Alias for .parsereddit
!a <link>
!r user
"""
return parsereddit(l, b, i)
def _chat(bot, args):
n, c, m = args
if '.reddit.com/' in ' '.join(m):
input = bot.new_input(n, c, m)
# Treat input as a command.
input.set_command('parsereddit')
input.set_level(1)
input.set_user(bot.get_user(n))
input.args = []
# Handle multiple links
for word in m:
if '.reddit.com/' in word:
input.args.append(word)
bot.plugins.process_command(bot, input)
| 2.921875 | 3 |
scraper.py | adnanalimurtaza/tripadvisor_data_scraper | 1 | 12794529 | <reponame>adnanalimurtaza/tripadvisor_data_scraper<filename>scraper.py<gh_stars>1-10
# importing libraries
from bs4 import BeautifulSoup
import urllib
import os
import urllib.request
base_path = os.path.dirname(os.path.abspath('__file__'))
file = open(os.path.expanduser(r""+base_path+"/datasets/Hotels_Reviews.csv"), "wb")
file.write(b"name,current_price_per_night,average_rating,total_reviews_received,address,lat,lng,reviewer_nationality,rating,review_title,review"+ b"\n")
trip_advisor_url = "https://www.tripadvisor.com"
WebSites = [
trip_advisor_url+"/Hotels-g189896-Finland-Hotels.html"]
# looping through each site until it hits a break
for page_url in WebSites:
page_source = urllib.request.urlopen(page_url).read()
soup = BeautifulSoup(page_source, "lxml")
divs = soup.find_all('div', { "class" : "hotel_content easyClear sem" })
for hotel_content in divs:
# Extract name, url
listing_title = hotel_content.find('div', { "class" : "listing_title" })
name = listing_title.find('a').contents[0]
name = name.replace(",", "")
print(name)
url = listing_title.find('a')['href']
url = trip_advisor_url + url
#Extract current_price_per_night
price_div = hotel_content.find('div', { "class" : "price" })
price_span = price_div.select_one("span")
current_price_per_night = price_span.text
# Extract average_rating
average_rating_div = hotel_content.find('div', { "class" : "bubbleRating" })
average_rating = average_rating_div.select_one("span")["content"]
# Extract total_reviews_received, reviews_url
reviews_span = hotel_content.find('span', { "class" : "reviewCount" })
if (reviews_span == None):
total_reviews_received = "0"
continue
else:
total_reviews_received = reviews_span.text.split(' ')[0].replace(",","")
print(total_reviews_received)
reviews_url = reviews_span.find('a')['href']
reviews_url = trip_advisor_url + reviews_url
hotel_page_source = urllib.request.urlopen(reviews_url).read()
hotel_soup = BeautifulSoup(hotel_page_source, "lxml")
page = 1
while True:
# Extract Lat, Lng
lat = lng = ""
all_script = hotel_soup.find_all("script", {"src":False})
keys = ['lat', 'lng']
for script in all_script:
all_value = script.string
if (all_value == None):
continue
for line in all_value.splitlines():
if line.split(':')[0].strip() in keys:
if (line.split(':')[0].strip() == keys[0]):
lat = line.split(':')[1].strip()
else:
lng = line.split(':')[1].strip()
lat = lat.replace(",", "")
lng = lng.replace(",", "")
# Extract Address
address_div = hotel_soup.find('div', { "class" : "prw_rup prw_common_atf_header_bl headerBL"})
street_address = address_div.find('span', { "class" : "street-address" }).text
locality = address_div.find('span', { "class" : "locality" }).text
if (len(locality.split(' ')) > 2):
city = locality.split(' ')[0]
postal_code = locality.split(' ')[1]
else:
city = ""
postal_code = ""
country = address_div.find('span', { "class" : "country-name" }).text
address = street_address + " " + locality + " " + country
address = address.replace(",", "")
reviews = hotel_soup.find_all('div', {"class": "review-container"})
# Loop through all reviews aavailable on page
for review in reviews:
# Extract reviewer_name
reviewer_name_div = review.find('div', {"class": "username mo"})
if (reviewer_name_div == None):
reviewer_name = ""
else:
reviewer_name = reviewer_name_div.find("span", {"class": "expand_inline scrname"}).text
reviewer_name = reviewer_name.replace(",", " ")
# Extract reviewer_nationality
reviewer_location_div = review.find('div', {"class": "location"})
if (reviewer_location_div == None):
reviewer_nationality = ""
else:
reviewer_nationality = reviewer_location_div.find("span", {"class": "expand_inline userLocation"}).text
reviewer_nationality = reviewer_nationality.replace(",", " ")
# Extract rating_given_by_reviewer, review_date
rating_div = review.find("div", {"class": "rating reviewItemInline"})
if (rating_div == None):
rating = ""
else:
if (rating_div.find("span", {"class": "ui_bubble_rating bubble_50"}) != None):
rating = 5
elif (rating_div.find("span", {"class": "ui_bubble_rating bubble_40"}) != None):
rating = 4
elif (rating_div.find("span", {"class": "ui_bubble_rating bubble_30"}) != None):
rating = 3
elif (rating_div.find("span", {"class": "ui_bubble_rating bubble_20"}) != None):
rating = 2
elif (rating_div.find("span", {"class": "ui_bubble_rating bubble_10"}) != None):
rating = 1
else:
rating = ""
review_date_span = rating_div.find("span", {"class": "ratingDate relativeDate"})
if (review_date_span != None):
review_date = review_date_span["title"]
else:
review_date = ""
# TODO Add day_since_fetch column after finlaizing dataset
# Extract review_title,
review_title_div = review.find("div", {"class": "quote"})
if (review_title_div != None):
review_title_span = review_title_div.find("span", {"class": "noQuotes"})
if (review_title_span != None):
review_title = review_title_span.text
else:
review_title = ""
else:
review_title = ""
review_title = review_title.replace(",", " ")
# TODO Add review_words_count column after finlaizing dataset
# Extract review
review_div = review.find("div", {"class": "prw_rup prw_reviews_text_summary_hsx"})
if (review_div == None):
review = ""
else:
partial_review = review_div.find("p", {"class": "partial_entry"})
if (partial_review == None):
review = ""
else:
review = partial_review.text[:-6]
review = review.replace(",", " ")
review = review.replace("\n", " ")
# Add Record
record = str(name) + "," + str(current_price_per_night) + "," + str(average_rating) + "," + str(total_reviews_received) + "," + str(address) + "," + str(lat) + "," + str(lng) + "," + str(reviewer_nationality) + "," + str(rating) + "," +str(review_title) + "," + str(review)
file.write(bytes(record, encoding="ascii", errors='ignore') + b"\n")
# Extract pagination url
count = float(total_reviews_received)/10
if (count > 150):
count = 150
pagination_div = hotel_soup.find('div', { "class" : "unified pagination north_star "})
page_div = hotel_soup.find('div', { "class" : "pageNumbers"})
pagination_spans = page_div.find_all('span', { "class" : "pageNum taLnk " })
next_url = ""
if ((page < count) & (len(pagination_spans) > 3)):
page = page + 2
next_url = pagination_spans[3]['data-href']
next_url = trip_advisor_url + next_url
hotel_page_source = urllib.request.urlopen(next_url).read()
hotel_soup = BeautifulSoup(hotel_page_source, "lxml")
else:
break
file.close()
| 3.078125 | 3 |
tasks/__init__.py | vladcorneci/golden-gate | 262 | 12794530 | # Copyright 2017-2020 Fitbit, Inc
# SPDX-License-Identifier: Apache-2.0
"""
Invoke configuration for Golden Gate
"""
# First check that we are running in a Python >= 3.5 environment
from __future__ import print_function
import sys
if not sys.version_info.major == 3 and sys.version_info.minor >= 5:
print(
"""You are using 'invoke' in a Python 2.x environment, but Python >= 3.5 is required.
You have probably not activated the 'gg' conda environment, please check the 'Getting Started'
guide for more details on how to setup your environment""")
sys.exit(1)
# Imports
import os
import subprocess
from invoke import Collection, Config, task
from . import android
from . import apple
from . import pylon
from . import native
from . import clean
from . import wasm
from . import doc
from . import docker
# Assuming you haven't moved the default location of '.git', the .git/ folder (even for submodules)
# will be at the root of the repo. Thus, find the folder .git/ is within and assume that's the root
GIT_DIR = subprocess.check_output("git rev-parse --show-toplevel",
shell=True).strip().decode("utf-8")
ROOT_DIR = GIT_DIR
# Initialize constants that are common among all platforms/products
def initialize_constants(cfg):
cfg.C = {}
# We can't access the paths variable by using dot notation, since there is a paths() function
# on a Config object. We much use Dictionary syntax.
# http://docs.pyinvoke.org/en/0.15.0/api/config.html#module-invoke.config
cfg.C.ROOT_DIR = ROOT_DIR
cfg.C.BIN_DIR = os.path.join(cfg.C.ROOT_DIR, "bin")
cfg.C.BUILD_ROOT_DIR = os.path.join(cfg.C.ROOT_DIR, "xp/build")
cfg.C.BUILD_DIR = os.path.join(cfg.C.ROOT_DIR, "xp/build/cmake")
cfg.C.BUILD_DIR_NATIVE = os.path.join(cfg.C.BUILD_DIR, "native")
cfg.C.PLATFORM_DIR = os.path.join(cfg.C.ROOT_DIR, "platform")
cfg.C.APPS_DIR = os.path.join(cfg.C.BUILD_DIR_NATIVE, "apps")
cfg.C.APPLE_BUILD_TEMP_DIR = os.path.join(cfg.C.PLATFORM_DIR, "apple/output")
cfg.C.DOC_DIR = os.path.join(cfg.C.ROOT_DIR, "docs")
config = Config(project_location=ROOT_DIR)
initialize_constants(config)
# Add collections
ns = Collection()
ns.add_collection(android)
ns.add_collection(apple)
ns.add_collection(pylon)
ns.add_collection(native)
ns.add_collection(clean)
ns.add_collection(wasm)
ns.add_collection(doc)
ns.add_collection(docker)
# After collections are set up, set the config.
ns.configure(config)
ns.configure(android.config)
ns.configure(apple.config)
ns.configure(pylon.config)
| 1.929688 | 2 |
pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py | bdeetz/pynos | 12 | 12794531 | #!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_arp(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def hide_arp_holder_system_max_arp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
system_max = ET.SubElement(hide_arp_holder, "system-max")
arp = ET.SubElement(system_max, "arp")
arp.text = kwargs.pop('arp')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_arp_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address.text = kwargs.pop('arp_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_mac_address_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
mac_address_value = ET.SubElement(arp_entry, "mac-address-value")
mac_address_value.text = kwargs.pop('mac_address_value')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacename(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacename = ET.SubElement(arp_entry, "interfacename")
interfacename.text = kwargs.pop('interfacename')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_GigabitEthernet_GigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
GigabitEthernet = ET.SubElement(interfacetype, "GigabitEthernet")
GigabitEthernet = ET.SubElement(GigabitEthernet, "GigabitEthernet")
GigabitEthernet.text = kwargs.pop('GigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_TenGigabitEthernet_TenGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
TenGigabitEthernet = ET.SubElement(interfacetype, "TenGigabitEthernet")
TenGigabitEthernet = ET.SubElement(TenGigabitEthernet, "TenGigabitEthernet")
TenGigabitEthernet.text = kwargs.pop('TenGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_FortyGigabitEthernet_FortyGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
FortyGigabitEthernet = ET.SubElement(interfacetype, "FortyGigabitEthernet")
FortyGigabitEthernet = ET.SubElement(FortyGigabitEthernet, "FortyGigabitEthernet")
FortyGigabitEthernet.text = kwargs.pop('FortyGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
HundredGigabitEthernet = ET.SubElement(interfacetype, "HundredGigabitEthernet")
HundredGigabitEthernet = ET.SubElement(HundredGigabitEthernet, "HundredGigabitEthernet")
HundredGigabitEthernet.text = kwargs.pop('HundredGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_Ve_Ve(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Ve = ET.SubElement(interfacetype, "Ve")
Ve = ET.SubElement(Ve, "Ve")
Ve.text = kwargs.pop('Ve')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
interface = ET.SubElement(input_type, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
interface = ET.SubElement(input_type, "interface")
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_dynamic_dynamic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
dynamic = ET.SubElement(input_type, "dynamic")
dynamic = ET.SubElement(dynamic, "dynamic")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_static_static(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
static = ET.SubElement(input_type, "static")
static = ET.SubElement(static, "static")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_ip_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
ip = ET.SubElement(input_type, "ip")
ip_address = ET.SubElement(ip, "ip-address")
ip_address.text = kwargs.pop('ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address = ET.SubElement(arp_entry, "ip-address")
ip_address.text = kwargs.pop('ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
mac_address = ET.SubElement(arp_entry, "mac-address")
mac_address.text = kwargs.pop('mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_type = ET.SubElement(arp_entry, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_name = ET.SubElement(arp_entry, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_is_resolved(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
is_resolved = ET.SubElement(arp_entry, "is-resolved")
is_resolved.text = kwargs.pop('is_resolved')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
age = ET.SubElement(arp_entry, "age")
age.text = kwargs.pop('age')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_entry_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
entry_type = ET.SubElement(arp_entry, "entry-type")
entry_type.text = kwargs.pop('entry_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_system_max_arp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
system_max = ET.SubElement(hide_arp_holder, "system-max")
arp = ET.SubElement(system_max, "arp")
arp.text = kwargs.pop('arp')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_arp_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address.text = kwargs.pop('arp_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_mac_address_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
mac_address_value = ET.SubElement(arp_entry, "mac-address-value")
mac_address_value.text = kwargs.pop('mac_address_value')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacename(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacename = ET.SubElement(arp_entry, "interfacename")
interfacename.text = kwargs.pop('interfacename')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Port_channel = ET.SubElement(interfacetype, "Port-channel")
Port_channel = ET.SubElement(Port_channel, "Port-channel")
Port_channel.text = kwargs.pop('Port_channel')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_GigabitEthernet_GigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
GigabitEthernet = ET.SubElement(interfacetype, "GigabitEthernet")
GigabitEthernet = ET.SubElement(GigabitEthernet, "GigabitEthernet")
GigabitEthernet.text = kwargs.pop('GigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_TenGigabitEthernet_TenGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
TenGigabitEthernet = ET.SubElement(interfacetype, "TenGigabitEthernet")
TenGigabitEthernet = ET.SubElement(TenGigabitEthernet, "TenGigabitEthernet")
TenGigabitEthernet.text = kwargs.pop('TenGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_FortyGigabitEthernet_FortyGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
FortyGigabitEthernet = ET.SubElement(interfacetype, "FortyGigabitEthernet")
FortyGigabitEthernet = ET.SubElement(FortyGigabitEthernet, "FortyGigabitEthernet")
FortyGigabitEthernet.text = kwargs.pop('FortyGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_HundredGigabitEthernet_HundredGigabitEthernet(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
HundredGigabitEthernet = ET.SubElement(interfacetype, "HundredGigabitEthernet")
HundredGigabitEthernet = ET.SubElement(HundredGigabitEthernet, "HundredGigabitEthernet")
HundredGigabitEthernet.text = kwargs.pop('HundredGigabitEthernet')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def hide_arp_holder_arp_entry_interfacetype_Ve_Ve(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp")
arp_entry = ET.SubElement(hide_arp_holder, "arp-entry")
arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address")
arp_ip_address_key.text = kwargs.pop('arp_ip_address')
interfacetype = ET.SubElement(arp_entry, "interfacetype")
Ve = ET.SubElement(interfacetype, "Ve")
Ve = ET.SubElement(Ve, "Ve")
Ve.text = kwargs.pop('Ve')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
interface = ET.SubElement(input_type, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
interface = ET.SubElement(input_type, "interface")
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_dynamic_dynamic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
dynamic = ET.SubElement(input_type, "dynamic")
dynamic = ET.SubElement(dynamic, "dynamic")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_static_static(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
static = ET.SubElement(input_type, "static")
static = ET.SubElement(static, "static")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_input_input_type_ip_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
input = ET.SubElement(get_arp, "input")
input_type = ET.SubElement(input, "input-type")
ip = ET.SubElement(input_type, "ip")
ip_address = ET.SubElement(ip, "ip-address")
ip_address.text = kwargs.pop('ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address = ET.SubElement(arp_entry, "ip-address")
ip_address.text = kwargs.pop('ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
mac_address = ET.SubElement(arp_entry, "mac-address")
mac_address.text = kwargs.pop('mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_type = ET.SubElement(arp_entry, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
interface_name = ET.SubElement(arp_entry, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_is_resolved(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
is_resolved = ET.SubElement(arp_entry, "is-resolved")
is_resolved.text = kwargs.pop('is_resolved')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
age = ET.SubElement(arp_entry, "age")
age.text = kwargs.pop('age')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_arp_output_arp_entry_entry_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address_key = ET.SubElement(arp_entry, "ip-address")
ip_address_key.text = kwargs.pop('ip_address')
entry_type = ET.SubElement(arp_entry, "entry-type")
entry_type.text = kwargs.pop('entry_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| 2.546875 | 3 |
04_test_automation/nose/bisiesto.py | twiindan/api_lessons | 0 | 12794532 |
def es_bisiesto(year):
if not isinstance(year, int):
return "Error: Should be a integer"
if year < 0:
return "Error: Should be a positive number"
if year % 100 == 0:
if year % 400 == 0:
return 'Is leap year'
else:
return 'Is not leap year'
elif year % 4 == 0:
return 'Is leap year'
else:
return 'Is not leap year'
| 3.84375 | 4 |
cogs/utility.py | BRAVO68WEB/Zarena | 0 | 12794533 | import discord
from discord.ext import commands
import aiohttp
import sys
import time
import googletrans
import functools
class utility:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def avatar(self, ctx, *, member: discord.Member = None):
if member is None:
embed=discord.Embed(title="No mention!", description="Please mention a user to view his profile!", color=0xff0000)
await ctx.send(embed=embed)
else:
embed = discord.Embed(title=f"{member}'s profile picture", color=0xeee657)
embed.set_image(url=member.avatar_url)
await ctx.send(embed=embed)
@commands.command()
async def code(self, ctx, *, msg):
"""Write text in code format."""
await ctx.message.delete()
await ctx.send("```" + msg.replace("`", "") + "```")
@commands.command()
async def echo(self, ctx, *, content:str):
await ctx.send(content)
await ctx.message.delete()
@commands.command()
async def hello(self, ctx):
"""*hello
A command that will respond with a random greeting.
"""
choices = ('Hey!', 'Hello!', 'Hi!', 'Hallo!', 'Bonjour!', 'Hola!')
await ctx.send(choice(choices))
@commands.command(aliases=['platform'])
async def plat(self,ctx):
await ctx.send('Running on ' + sys.platform)
@commands.command(name='members')
async def membs(self, ctx):
server = ctx.guild
for member in server.members:
await ctx.send(member)
@commands.command(name='roles')
async def rols(self, ctx):
server = ctx.guild
for role in server.roles:
await ctx.send(role)
@commands.command(name='member')
async def mem(self, ctx):
server = ctx.guild
list = []
for member in server.members:
list.append(member.name)
embed = discord.Embed(name = 'Members', description = str(list) ,colour = discord.Colour.green())
await ctx.send(embed=embed)
@commands.command(name='role')
async def rol(self, ctx):
server = ctx.guild
list = []
for role in server.roles:
list.append(role.name)
embed = discord.Embed(name = 'Roles', description = str(list) ,colour = discord.Colour.green())
await ctx.send(embed=embed)
@commands.command(name='pingme')
async def pingme(self, ctx):
await ctx.send(ctx.author.mention)
def setup(bot):
bot.add_cog(utility(bot))
| 2.84375 | 3 |
tests/test_wrangling.py | BlaneG/CAN-income-stats | 0 | 12794534 | import pandas as pd
import numpy as np
import pytest
from ..wrangling import (
subset_plot_data_for_income_bins,
subset_plot_data_for_scatter_plot,
subset_year_age_sex_geo
)
def test_subset_plot_data_for_income_bins():
expected_result = {'Age group': {598748: '35 to 44 years',
598749: '35 to 44 years',
598750: '35 to 44 years',
598751: '35 to 44 years',
598752: '35 to 44 years',
598753: '35 to 44 years',
598754: '35 to 44 years',
598755: '35 to 44 years',
598756: '35 to 44 years',
598757: '35 to 44 years',
598758: '35 to 44 years',
598759: '35 to 44 years',
598760: '35 to 44 years'},
'GEO': {598748: 'Canada',
598749: 'Canada',
598750: 'Canada',
598751: 'Canada',
598752: 'Canada',
598753: 'Canada',
598754: 'Canada',
598755: 'Canada',
598756: 'Canada',
598757: 'Canada',
598758: 'Canada',
598759: 'Canada',
598760: 'Canada'},
'Persons with income': {598748: 'Persons with income under $5,000',
598749: 'Persons with income of $5,000 and over',
598750: 'Persons with income of $10,000 and over',
598751: 'Persons with income of $15,000 and over',
598752: 'Persons with income of $20,000 and over',
598753: 'Persons with income of $25,000 and over',
598754: 'Persons with income of $35,000 and over',
598755: 'Persons with income of $50,000 and over',
598756: 'Persons with income of $75,000 and over',
598757: 'Persons with income of $100,000 and over',
598758: 'Persons with income of $150,000 and over',
598759: 'Persons with income of $200,000 and over',
598760: 'Persons with income of $250,000 and over'},
'REF_DATE': {598748: 2017,
598749: 2017,
598750: 2017,
598751: 2017,
598752: 2017,
598753: 2017,
598754: 2017,
598755: 2017,
598756: 2017,
598757: 2017,
598758: 2017,
598759: 2017,
598760: 2017},
'SCALAR_FACTOR': {598748: 'units ',
598749: 'units ',
598750: 'units ',
598751: 'units ',
598752: 'units ',
598753: 'units ',
598754: 'units ',
598755: 'units ',
598756: 'units ',
598757: 'units ',
598758: 'units ',
598759: 'units ',
598760: 'units '},
'Sex': {598748: 'Females',
598749: 'Females',
598750: 'Females',
598751: 'Females',
598752: 'Females',
598753: 'Females',
598754: 'Females',
598755: 'Females',
598756: 'Females',
598757: 'Females',
598758: 'Females',
598759: 'Females',
598760: 'Females'},
'VALUE': {598748: 116190.0,
598749: 2214880.0,
598750: 2098920.0,
598751: 1966980.0,
598752: 1836860.0,
598753: 1699380.0,
598754: 1406370.0,
598755: 958310.0,
598756: 470300.0,
598757: 193910.0,
598758: 48780.0,
598759: 20580.0,
598760: 10390.0}}
# params
path = r"../../data/raw/11100008.csv"
df = pd.read_csv(path, low_memory=False)
age = "35 to 44 years"
year = 2017
geo = "Canada"
sex = "Females"
df = subset_year_age_sex_geo(df, year, age, sex, geo)
df = subset_plot_data_for_income_bins(df)
assert expected_result == df.to_dict()
def test_subset_plot_data_for_scatter_plot():
expected_value = {'Age group': {1550629: '25 to 34 years'},
'GEO': {1550629: 'Canada'},
'Income source': {1550629: 'Total income'},
'REF_DATE': {1550629: 2017},
'SCALAR_FACTOR': {1550629: 'units'},
'Sex': {1550629: 'Females'},
'Statistics': {1550629: 'Median income (excluding zeros)'},
'VALUE': {1550629: 34800.0}}
# load the data
path = r"../../data/raw/11100239.csv"
df = pd.read_csv(path, low_memory=False)
# parameters
year = 2017
Age = '25 to 34 years'
sex = "Females"
geo = "Canada"
cols_to_keep = ['REF_DATE',
'GEO',
'Sex',
'Age group',
'Income source',
'Statistics',
'SCALAR_FACTOR',
'VALUE',
]
df = subset_plot_data_for_scatter_plot(
df, year, Age, sex, geo,
["Total income"], ['Median income (excluding zeros)'],
cols_to_keep)
assert expected_value == df.to_dict() | 2.46875 | 2 |
blackstone/rules/citation_rules.py | goro53467/Blackstone | 541 | 12794535 | CITATION_PATTERNS = [
{
"label": "GENERIC_CASE_CITATION",
"pattern": [
{"IS_BRACKET": True, "OP": "?"},
{"SHAPE": "dddd"},
{"IS_BRACKET": True, "OP": "?"},
{"LIKE_NUM": True, "OP": "?"},
{"TEXT": {"REGEX": "^[A-Z]"}, "OP": "?"},
{"ORTH": ".", "OP": "?"},
{"TEXT": {"REGEX": r"^[A-Z\.]"}},
{"ORTH": ".", "OP": "?"},
{"LIKE_NUM": True},
],
}
]
| 1.65625 | 2 |
release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/provision/__init__.py | zaion520/ATtomato | 2 | 12794536 |
# Unix SMB/CIFS implementation.
# backend code for provisioning a Samba4 server
# Copyright (C) <NAME> <<EMAIL>> 2007-2010
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
#
# Based on the original in EJS:
# Copyright (C) <NAME> <<EMAIL>> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Functions for setting up a Samba configuration."""
__docformat__ = "restructuredText"
from base64 import b64encode
import os
import re
import pwd
import grp
import logging
import time
import uuid
import socket
import urllib
import shutil
import ldb
from samba.auth import system_session, admin_session
import samba
from samba import (
Ldb,
check_all_substituted,
in_source_tree,
source_tree_topdir,
read_and_sub_file,
setup_file,
substitute_var,
valid_netbios_name,
version,
)
from samba.dcerpc import security
from samba.dcerpc.misc import (
SEC_CHAN_BDC,
SEC_CHAN_WKSTA,
)
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2,
ENC_ALL_TYPES,
)
from samba.idmap import IDmapDB
from samba.ms_display_specifiers import read_ms_ldif
from samba.ntacls import setntacl, dsacl2fsacl
from samba.ndr import ndr_pack, ndr_unpack
from samba.provision.backend import (
ExistingBackend,
FDSBackend,
LDBBackend,
OpenLDAPBackend,
)
import samba.param
import samba.registry
from samba.schema import Schema
from samba.samdb import SamDB
VALID_NETBIOS_CHARS = " !#$%&'()-.@^_{}~"
DEFAULT_POLICY_GUID = "31B2F340-016D-11D2-945F-00C04FB984F9"
DEFAULT_DC_POLICY_GUID = "6AC1786C-016F-11D2-945F-00C04fB984F9"
DEFAULTSITE = "Default-First-Site-Name"
LAST_PROVISION_USN_ATTRIBUTE = "lastProvisionUSN"
def setup_path(file):
"""Return an absolute path to the provision tempate file specified by file"""
return os.path.join(samba.param.setup_dir(), file)
# Descriptors of naming contexts and other important objects
# "get_schema_descriptor" is located in "schema.py"
def get_sites_descriptor(domain_sid):
sddl = "D:(A;;RPLCLORC;;;AU)" \
"(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"S:AI(AU;CISA;CCDCSDDT;;;WD)" \
"(OU;CIIOSA;CR;;f0f8ffab-1191-11d0-a060-00aa006c33ed;WD)" \
"(OU;CIIOSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CIIOSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CIIOSA;WP;3e10944c-c354-11d0-aff8-0000f80367c1;b7b13124-b82e-11d0-afee-0000f80367c1;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
def get_config_descriptor(domain_sid):
sddl = "O:EAG:EAD:(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(A;;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;CIIO;RPWPCRCCLCLORCWOWDSDSW;;;DA)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ER)" \
"S:(AU;SA;WPWOWD;;;WD)(AU;SA;CR;;;BA)(AU;SA;CR;;;DU)" \
"(OU;SA;CR;45ec5156-db7e-47bb-b53f-dbeb2d03c40f;;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
def get_domain_descriptor(domain_sid):
sddl= "O:BAG:BAD:AI(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ER)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;IF)" \
"(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \
"(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \
"(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
"(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \
"(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \
"(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
"(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
"(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;;RPRC;;;RU)" \
"(A;CI;LC;;;RU)" \
"(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
"(A;;RP;;;WD)" \
"(A;;RPLCLORC;;;ED)" \
"(A;;RPLCLORC;;;AU)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"S:AI(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
"(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
class ProvisionPaths(object):
def __init__(self):
self.shareconf = None
self.hklm = None
self.hkcu = None
self.hkcr = None
self.hku = None
self.hkpd = None
self.hkpt = None
self.samdb = None
self.idmapdb = None
self.secrets = None
self.keytab = None
self.dns_keytab = None
self.dns = None
self.winsdb = None
self.private_dir = None
class ProvisionNames(object):
def __init__(self):
self.rootdn = None
self.domaindn = None
self.configdn = None
self.schemadn = None
self.ldapmanagerdn = None
self.dnsdomain = None
self.realm = None
self.netbiosname = None
self.domain = None
self.hostname = None
self.sitename = None
self.smbconf = None
def update_provision_usn(samdb, low, high, replace=False):
"""Update the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade
:param replace: A boolean indicating if the range should replace any
existing one or appended (default)
"""
tab = []
if not replace:
entry = samdb.search(expression="(&(dn=@PROVISION)(%s=*))" %
LAST_PROVISION_USN_ATTRIBUTE, base="",
scope=ldb.SCOPE_SUBTREE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE, "dn"])
for e in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
tab.append(str(e))
tab.append("%s-%s" % (low, high))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = ldb.MessageElement(tab,
ldb.FLAG_MOD_REPLACE, LAST_PROVISION_USN_ATTRIBUTE)
samdb.modify(delta)
def set_provision_usn(samdb, low, high):
"""Set the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade"""
tab = []
tab.append("%s-%s" % (low, high))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = ldb.MessageElement(tab,
ldb.FLAG_MOD_ADD, LAST_PROVISION_USN_ATTRIBUTE)
samdb.add(delta)
def get_max_usn(samdb,basedn):
""" This function return the biggest USN present in the provision
:param samdb: A LDB object pointing to the sam.ldb
:param basedn: A string containing the base DN of the provision
(ie. DC=foo, DC=bar)
:return: The biggest USN in the provision"""
res = samdb.search(expression="objectClass=*",base=basedn,
scope=ldb.SCOPE_SUBTREE,attrs=["uSNChanged"],
controls=["search_options:1:2",
"server_sort:1:1:uSNChanged",
"paged_results:1:1"])
return res[0]["uSNChanged"]
def get_last_provision_usn(sam):
"""Get the lastest USN modified by a provision or an upgradeprovision
:param sam: An LDB object pointing to the sam.ldb
:return: an integer corresponding to the highest USN modified by
(upgrade)provision, 0 is this value is unknown
"""
entry = sam.search(expression="(&(dn=@PROVISION)(%s=*))" %
LAST_PROVISION_USN_ATTRIBUTE,
base="", scope=ldb.SCOPE_SUBTREE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE])
if len(entry):
range = []
idx = 0
p = re.compile(r'-')
for r in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
tab = p.split(str(r))
range.append(tab[0])
range.append(tab[1])
idx = idx + 1
return range
else:
return None
class ProvisionResult(object):
def __init__(self):
self.paths = None
self.domaindn = None
self.lp = None
self.samdb = None
def check_install(lp, session_info, credentials):
"""Check whether the current install seems ok.
:param lp: Loadparm context
:param session_info: Session information
:param credentials: Credentials
"""
if lp.get("realm") == "":
raise Exception("Realm empty")
samdb = Ldb(lp.get("sam database"), session_info=session_info,
credentials=credentials, lp=lp)
if len(samdb.search("(cn=Administrator)")) != 1:
raise ProvisioningError("No administrator account found")
def findnss(nssfn, names):
"""Find a user or group from a list of possibilities.
:param nssfn: NSS Function to try (should raise KeyError if not found)
:param names: Names to check.
:return: Value return by first names list.
"""
for name in names:
try:
return nssfn(name)
except KeyError:
pass
raise KeyError("Unable to find user/group in %r" % names)
findnss_uid = lambda names: findnss(pwd.getpwnam, names)[2]
findnss_gid = lambda names: findnss(grp.getgrnam, names)[2]
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Setup a ldb in the private dir.
:param ldb: LDB file to import data into
:param ldif_path: Path of the LDIF file to load
:param subst_vars: Optional variables to subsitute in LDIF.
:param nocontrols: Optional list of controls, can be None for no controls
"""
assert isinstance(ldif_path, str)
data = read_and_sub_file(ldif_path, subst_vars)
ldb.add_ldif(data, controls)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Modify a ldb in the private dir.
:param ldb: LDB object.
:param ldif_path: LDIF file path.
:param subst_vars: Optional dictionary with substitution variables.
"""
data = read_and_sub_file(ldif_path, subst_vars)
ldb.modify_ldif(data, controls)
def setup_ldb(ldb, ldif_path, subst_vars):
"""Import a LDIF a file into a LDB handle, optionally substituting
variables.
:note: Either all LDIF data will be added or none (using transactions).
:param ldb: LDB file to import into.
:param ldif_path: Path to the LDIF file.
:param subst_vars: Dictionary with substitution variables.
"""
assert ldb is not None
ldb.transaction_start()
try:
setup_add_ldif(ldb, ldif_path, subst_vars)
except Exception:
ldb.transaction_cancel()
raise
else:
ldb.transaction_commit()
def provision_paths_from_lp(lp, dnsdomain):
"""Set the default paths for provisioning.
:param lp: Loadparm context.
:param dnsdomain: DNS Domain name
"""
paths = ProvisionPaths()
paths.private_dir = lp.get("private dir")
# This is stored without path prefix for the "privateKeytab" attribute in
# "secrets_dns.ldif".
paths.dns_keytab = "dns.keytab"
paths.keytab = "secrets.keytab"
paths.shareconf = os.path.join(paths.private_dir, "share.ldb")
paths.samdb = os.path.join(paths.private_dir,
lp.get("sam database") or "samdb.ldb")
paths.idmapdb = os.path.join(paths.private_dir,
lp.get("idmap database") or "idmap.ldb")
paths.secrets = os.path.join(paths.private_dir,
lp.get("secrets database") or "secrets.ldb")
paths.privilege = os.path.join(paths.private_dir, "privilege.ldb")
paths.dns = os.path.join(paths.private_dir, "dns", dnsdomain + ".zone")
paths.dns_update_list = os.path.join(paths.private_dir, "dns_update_list")
paths.spn_update_list = os.path.join(paths.private_dir, "spn_update_list")
paths.namedconf = os.path.join(paths.private_dir, "named.conf")
paths.namedconf_update = os.path.join(paths.private_dir, "named.conf.update")
paths.namedtxt = os.path.join(paths.private_dir, "named.txt")
paths.krb5conf = os.path.join(paths.private_dir, "krb5.conf")
paths.winsdb = os.path.join(paths.private_dir, "wins.ldb")
paths.s4_ldapi_path = os.path.join(paths.private_dir, "ldapi")
paths.phpldapadminconfig = os.path.join(paths.private_dir,
"phpldapadmin-config.php")
paths.hklm = "hklm.ldb"
paths.hkcr = "hkcr.ldb"
paths.hkcu = "hkcu.ldb"
paths.hku = "hku.ldb"
paths.hkpd = "hkpd.ldb"
paths.hkpt = "hkpt.ldb"
paths.sysvol = lp.get("path", "sysvol")
paths.netlogon = lp.get("path", "netlogon")
paths.smbconf = lp.configfile
return paths
def guess_names(lp=None, hostname=None, domain=None, dnsdomain=None,
serverrole=None, rootdn=None, domaindn=None, configdn=None,
schemadn=None, serverdn=None, sitename=None):
"""Guess configuration settings to use."""
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = lp.get("netbios name")
if netbiosname is None:
netbiosname = hostname
# remove forbidden chars
newnbname = ""
for x in netbiosname:
if x.isalnum() or x in VALID_NETBIOS_CHARS:
newnbname = "%s%c" % (newnbname, x)
# force the length to be <16
netbiosname = newnbname[0:15]
assert netbiosname is not None
netbiosname = netbiosname.upper()
if not valid_netbios_name(netbiosname):
raise InvalidNetbiosName(netbiosname)
if dnsdomain is None:
dnsdomain = lp.get("realm")
if dnsdomain is None or dnsdomain == "":
raise ProvisioningError("guess_names: 'realm' not specified in supplied %s!", lp.configfile)
dnsdomain = dnsdomain.lower()
if serverrole is None:
serverrole = lp.get("server role")
if serverrole is None:
raise ProvisioningError("guess_names: 'server role' not specified in supplied %s!" % lp.configfile)
serverrole = serverrole.lower()
realm = dnsdomain.upper()
if lp.get("realm") == "":
raise ProvisioningError("guess_names: 'realm =' was not specified in supplied %s. Please remove the smb.conf file and let provision generate it" % lp.configfile)
if lp.get("realm").upper() != realm:
raise ProvisioningError("guess_names: 'realm=%s' in %s must match chosen realm '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("realm").upper(), realm, lp.configfile))
if lp.get("server role").lower() != serverrole:
raise ProvisioningError("guess_names: 'server role=%s' in %s must match chosen server role '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("server role").upper(), serverrole, lp.configfile))
if serverrole == "domain controller":
if domain is None:
# This will, for better or worse, default to 'WORKGROUP'
domain = lp.get("workgroup")
domain = domain.upper()
if lp.get("workgroup").upper() != domain:
raise ProvisioningError("guess_names: Workgroup '%s' in smb.conf must match chosen domain '%s'! Please remove the %s file and let provision generate it" % (lp.get("workgroup").upper(), domain, lp.configfile))
if domaindn is None:
domaindn = "DC=" + dnsdomain.replace(".", ",DC=")
if domain == netbiosname:
raise ProvisioningError("guess_names: Domain '%s' must not be equal to short host name '%s'!" % (domain, netbiosname))
else:
domain = netbiosname
if domaindn is None:
domaindn = "DC=" + netbiosname
if not valid_netbios_name(domain):
raise InvalidNetbiosName(domain)
if hostname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to hostname '%s'!" % (realm, hostname))
if netbiosname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to netbios hostname '%s'!" % (realm, netbiosname))
if domain == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to short domain name '%s'!" % (realm, domain))
if rootdn is None:
rootdn = domaindn
if configdn is None:
configdn = "CN=Configuration," + rootdn
if schemadn is None:
schemadn = "CN=Schema," + configdn
if sitename is None:
sitename=DEFAULTSITE
names = ProvisionNames()
names.rootdn = rootdn
names.domaindn = domaindn
names.configdn = configdn
names.schemadn = schemadn
names.ldapmanagerdn = "CN=Manager," + rootdn
names.dnsdomain = dnsdomain
names.domain = domain
names.realm = realm
names.netbiosname = netbiosname
names.hostname = hostname
names.sitename = sitename
names.serverdn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (
netbiosname, sitename, configdn)
return names
def make_smbconf(smbconf, hostname, domain, realm, serverrole,
targetdir, sid_generator="internal", eadb=False, lp=None):
"""Create a new smb.conf file based on a couple of basic settings.
"""
assert smbconf is not None
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = hostname.upper()
# remove forbidden chars
newnbname = ""
for x in netbiosname:
if x.isalnum() or x in VALID_NETBIOS_CHARS:
newnbname = "%s%c" % (newnbname, x)
#force the length to be <16
netbiosname = newnbname[0:15]
else:
netbiosname = hostname.upper()
if serverrole is None:
serverrole = "standalone"
assert serverrole in ("domain controller", "member server", "standalone")
if serverrole == "domain controller":
smbconfsuffix = "dc"
elif serverrole == "member server":
smbconfsuffix = "member"
elif serverrole == "standalone":
smbconfsuffix = "standalone"
if sid_generator is None:
sid_generator = "internal"
assert domain is not None
domain = domain.upper()
assert realm is not None
realm = realm.upper()
if lp is None:
lp = samba.param.LoadParm()
#Load non-existant file
if os.path.exists(smbconf):
lp.load(smbconf)
if eadb and not lp.get("posix:eadb"):
if targetdir is not None:
privdir = os.path.join(targetdir, "private")
else:
privdir = lp.get("private dir")
lp.set("posix:eadb", os.path.abspath(os.path.join(privdir, "eadb.tdb")))
if targetdir is not None:
privatedir_line = "private dir = " + os.path.abspath(os.path.join(targetdir, "private"))
lockdir_line = "lock dir = " + os.path.abspath(targetdir)
lp.set("lock dir", os.path.abspath(targetdir))
else:
privatedir_line = ""
lockdir_line = ""
if sid_generator == "internal":
sid_generator_line = ""
else:
sid_generator_line = "sid generator = " + sid_generator
sysvol = os.path.join(lp.get("lock dir"), "sysvol")
netlogon = os.path.join(sysvol, realm.lower(), "scripts")
setup_file(setup_path("provision.smb.conf.%s" % smbconfsuffix),
smbconf, {
"NETBIOS_NAME": netbiosname,
"DOMAIN": domain,
"REALM": realm,
"SERVERROLE": serverrole,
"NETLOGONPATH": netlogon,
"SYSVOLPATH": sysvol,
"SIDGENERATOR_LINE": sid_generator_line,
"PRIVATEDIR_LINE": privatedir_line,
"LOCKDIR_LINE": lockdir_line
})
# reload the smb.conf
lp.load(smbconf)
# and dump it without any values that are the default
# this ensures that any smb.conf parameters that were set
# on the provision/join command line are set in the resulting smb.conf
f = open(smbconf, mode='w')
lp.dump(f, False)
f.close()
def setup_name_mappings(samdb, idmap, sid, domaindn, root_uid, nobody_uid,
users_gid, wheel_gid):
"""setup reasonable name mappings for sam names to unix names.
:param samdb: SamDB object.
:param idmap: IDmap db object.
:param sid: The domain sid.
:param domaindn: The domain DN.
:param root_uid: uid of the UNIX root user.
:param nobody_uid: uid of the UNIX nobody user.
:param users_gid: gid of the UNIX users group.
:param wheel_gid: gid of the UNIX wheel group.
"""
idmap.setup_name_mapping("S-1-5-7", idmap.TYPE_UID, nobody_uid)
idmap.setup_name_mapping("S-1-5-32-544", idmap.TYPE_GID, wheel_gid)
idmap.setup_name_mapping(sid + "-500", idmap.TYPE_UID, root_uid)
idmap.setup_name_mapping(sid + "-513", idmap.TYPE_GID, users_gid)
def setup_samdb_partitions(samdb_path, logger, lp, session_info,
provision_backend, names, schema, serverrole,
erase=False):
"""Setup the partitions for the SAM database.
Alternatively, provision() may call this, and then populate the database.
:note: This will wipe the Sam Database!
:note: This function always removes the local SAM LDB file. The erase
parameter controls whether to erase the existing data, which
may not be stored locally but in LDAP.
"""
assert session_info is not None
# We use options=["modules:"] to stop the modules loading - we
# just want to wipe and re-initialise the database, not start it up
try:
os.unlink(samdb_path)
except OSError:
pass
samdb = Ldb(url=samdb_path, session_info=session_info,
lp=lp, options=["modules:"])
ldap_backend_line = "# No LDAP backend"
if provision_backend.type is not "ldb":
ldap_backend_line = "ldapBackend: %s" % provision_backend.ldap_uri
samdb.transaction_start()
try:
logger.info("Setting up sam.ldb partitions and settings")
setup_add_ldif(samdb, setup_path("provision_partitions.ldif"), {
"SCHEMADN": ldb.Dn(schema.ldb, names.schemadn).get_casefold(),
"CONFIGDN": ldb.Dn(schema.ldb, names.configdn).get_casefold(),
"DOMAINDN": ldb.Dn(schema.ldb, names.domaindn).get_casefold(),
"LDAP_BACKEND_LINE": ldap_backend_line,
})
setup_add_ldif(samdb, setup_path("provision_init.ldif"), {
"BACKEND_TYPE": provision_backend.type,
"SERVER_ROLE": serverrole
})
logger.info("Setting up sam.ldb rootDSE")
setup_samdb_rootdse(samdb, names)
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
def secretsdb_self_join(secretsdb, domain,
netbiosname, machinepass, domainsid=None,
realm=None, dnsdomain=None,
keytab_path=None,
key_version_number=1,
secure_channel_type=SEC_CHAN_WKSTA):
"""Add domain join-specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param machinepass: Machine password
"""
attrs = ["whenChanged",
"secret",
"priorSecret",
"priorChanged",
"krb5Keytab",
"privateKeytab"]
if realm is not None:
if dnsdomain is None:
dnsdomain = realm.lower()
dnsname = '%s.%s' % (netbiosname.lower(), dnsdomain.lower())
else:
dnsname = None
shortname = netbiosname.lower()
# We don't need to set msg["flatname"] here, because rdn_name will handle
# it, and it causes problems for modifies anyway
msg = ldb.Message(ldb.Dn(secretsdb, "flatname=%s,cn=Primary Domains" % domain))
msg["secureChannelType"] = [str(secure_channel_type)]
msg["objectClass"] = ["top", "primaryDomain"]
if dnsname is not None:
msg["objectClass"] = ["top", "primaryDomain", "kerberosSecret"]
msg["realm"] = [realm]
msg["saltPrincipal"] = ["host/%s@%s" % (dnsname, realm.upper())]
msg["msDS-KeyVersionNumber"] = [str(key_version_number)]
msg["privateKeytab"] = ["secrets.keytab"]
msg["secret"] = [machinepass]
msg["samAccountName"] = ["%s$" % netbiosname]
msg["secureChannelType"] = [str(secure_channel_type)]
if domainsid is not None:
msg["objectSid"] = [ndr_pack(domainsid)]
# This complex expression tries to ensure that we don't have more
# than one record for this SID, realm or netbios domain at a time,
# but we don't delete the old record that we are about to modify,
# because that would delete the keytab and previous password.
res = secretsdb.search(base="cn=Primary Domains", attrs=attrs,
expression=("(&(|(flatname=%s)(realm=%s)(objectSid=%s))(objectclass=primaryDomain)(!(dn=%s)))" % (domain, realm, str(domainsid), str(msg.dn))),
scope=ldb.SCOPE_ONELEVEL)
for del_msg in res:
secretsdb.delete(del_msg.dn)
res = secretsdb.search(base=msg.dn, attrs=attrs, scope=ldb.SCOPE_BASE)
if len(res) == 1:
msg["priorSecret"] = [res[0]["secret"][0]]
msg["priorWhenChanged"] = [res[0]["whenChanged"][0]]
try:
msg["privateKeytab"] = [res[0]["privateKeytab"][0]]
except KeyError:
pass
try:
msg["krb5Keytab"] = [res[0]["krb5Keytab"][0]]
except KeyError:
pass
for el in msg:
if el != 'dn':
msg[el].set_flags(ldb.FLAG_MOD_REPLACE)
secretsdb.modify(msg)
secretsdb.rename(res[0].dn, msg.dn)
else:
spn = [ 'HOST/%s' % shortname ]
if secure_channel_type == SEC_CHAN_BDC and dnsname is not None:
# we are a domain controller then we add servicePrincipalName
# entries for the keytab code to update.
spn.extend([ 'HOST/%s' % dnsname ])
msg["servicePrincipalName"] = spn
secretsdb.add(msg)
def secretsdb_setup_dns(secretsdb, names, private_dir, realm,
dnsdomain, dns_keytab_path, dnspass):
"""Add DNS specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param machinepass: Machine password
"""
try:
os.unlink(os.path.join(private_dir, dns_keytab_path))
except OSError:
pass
setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
"REALM": realm,
"DNSDOMAIN": dnsdomain,
"DNS_KEYTAB": dns_keytab_path,
"DNSPASS_B64": b64encode(dnspass),
"HOSTNAME": names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def setup_secretsdb(paths, session_info, backend_credentials, lp):
"""Setup the secrets database.
:note: This function does not handle exceptions and transaction on purpose,
it's up to the caller to do this job.
:param path: Path to the secrets database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(paths.secrets):
os.unlink(paths.secrets)
keytab_path = os.path.join(paths.private_dir, paths.keytab)
if os.path.exists(keytab_path):
os.unlink(keytab_path)
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.exists(dns_keytab_path):
os.unlink(dns_keytab_path)
path = paths.secrets
secrets_ldb = Ldb(path, session_info=session_info,
lp=lp)
secrets_ldb.erase()
secrets_ldb.load_ldif_file_add(setup_path("secrets_init.ldif"))
secrets_ldb = Ldb(path, session_info=session_info,
lp=lp)
secrets_ldb.transaction_start()
try:
secrets_ldb.load_ldif_file_add(setup_path("secrets.ldif"))
if (backend_credentials is not None and
backend_credentials.authentication_requested()):
if backend_credentials.get_bind_dn() is not None:
setup_add_ldif(secrets_ldb,
setup_path("secrets_simple_ldap.ldif"), {
"LDAPMANAGERDN": backend_credentials.get_bind_dn(),
"LDAPMANAGERPASS_B64": b64encode(backend_credentials.get_password())
})
else:
setup_add_ldif(secrets_ldb,
setup_path("secrets_sasl_ldap.ldif"), {
"LDAPADMINUSER": backend_credentials.get_username(),
"LDAPADMINREALM": backend_credentials.get_realm(),
"LDAPADMINPASS_B64": b64encode(backend_credentials.get_password())
})
return secrets_ldb
except Exception:
secrets_ldb.transaction_cancel()
raise
def setup_privileges(path, session_info, lp):
"""Setup the privileges database.
:param path: Path to the privileges database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(path):
os.unlink(path)
privilege_ldb = Ldb(path, session_info=session_info, lp=lp)
privilege_ldb.erase()
privilege_ldb.load_ldif_file_add(setup_path("provision_privilege.ldif"))
def setup_registry(path, session_info, lp):
"""Setup the registry.
:param path: Path to the registry database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
reg = samba.registry.Registry()
hive = samba.registry.open_ldb(path, session_info=session_info, lp_ctx=lp)
reg.mount_hive(hive, samba.registry.HKEY_LOCAL_MACHINE)
provision_reg = setup_path("provision.reg")
assert os.path.exists(provision_reg)
reg.diff_apply(provision_reg)
def setup_idmapdb(path, session_info, lp):
"""Setup the idmap database.
:param path: path to the idmap database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
if os.path.exists(path):
os.unlink(path)
idmap_ldb = IDmapDB(path, session_info=session_info, lp=lp)
idmap_ldb.erase()
idmap_ldb.load_ldif_file_add(setup_path("idmap_init.ldif"))
return idmap_ldb
def setup_samdb_rootdse(samdb, names):
"""Setup the SamDB rootdse.
:param samdb: Sam Database handle
"""
setup_add_ldif(samdb, setup_path("provision_rootdse_add.ldif"), {
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"ROOTDN": names.rootdn,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
})
def setup_self_join(samdb, names, machinepass, dnspass,
domainsid, next_rid, invocationid,
policyguid, policyguid_dc, domainControllerFunctionality,
ntdsguid):
"""Join a host to its own domain."""
assert isinstance(invocationid, str)
if ntdsguid is not None:
ntdsguid_line = "objectGUID: %s\n"%ntdsguid
else:
ntdsguid_line = ""
setup_add_ldif(samdb, setup_path("provision_self_join.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"INVOCATIONID": invocationid,
"NETBIOSNAME": names.netbiosname,
"DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
"MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')),
"DOMAINSID": str(domainsid),
"DCRID": str(next_rid),
"SAMBA_VERSION_STRING": version,
"NTDSGUID": ntdsguid_line,
"DOMAIN_CONTROLLER_FUNCTIONALITY": str(
domainControllerFunctionality)})
setup_add_ldif(samdb, setup_path("provision_group_policy.ldif"), {
"POLICYGUID": policyguid,
"POLICYGUID_DC": policyguid_dc,
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn})
# add the NTDSGUID based SPNs
ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
names.ntdsguid = samdb.searchone(basedn=ntds_dn, attribute="objectGUID",
expression="", scope=ldb.SCOPE_BASE)
assert isinstance(names.ntdsguid, str)
# Setup fSMORoleOwner entries to point at the newly created DC entry
setup_modify_ldif(samdb, setup_path("provision_self_join_modify.ldif"), {
"DOMAINDN": names.domaindn,
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DEFAULTSITE": names.sitename,
"SERVERDN": names.serverdn,
"NETBIOSNAME": names.netbiosname,
"RIDALLOCATIONSTART": str(next_rid + 100),
"RIDALLOCATIONEND": str(next_rid + 100 + 499),
})
# This is partially Samba4 specific and should be replaced by the correct
# DNS AD-style setup
setup_add_ldif(samdb, setup_path("provision_dns_add.ldif"), {
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn,
"DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')),
"HOSTNAME" : names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def getpolicypath(sysvolpath, dnsdomain, guid):
"""Return the physical path of policy given its guid.
:param sysvolpath: Path to the sysvol folder
:param dnsdomain: DNS name of the AD domain
:param guid: The GUID of the policy
:return: A string with the complete path to the policy folder
"""
if guid[0] != "{":
guid = "{%s}" % guid
policy_path = os.path.join(sysvolpath, dnsdomain, "Policies", guid)
return policy_path
def create_gpo_struct(policy_path):
if not os.path.exists(policy_path):
os.makedirs(policy_path, 0775)
open(os.path.join(policy_path, "GPT.INI"), 'w').write(
"[General]\r\nVersion=0")
p = os.path.join(policy_path, "MACHINE")
if not os.path.exists(p):
os.makedirs(p, 0775)
p = os.path.join(policy_path, "USER")
if not os.path.exists(p):
os.makedirs(p, 0775)
def create_default_gpo(sysvolpath, dnsdomain, policyguid, policyguid_dc):
"""Create the default GPO for a domain
:param sysvolpath: Physical path for the sysvol folder
:param dnsdomain: DNS domain name of the AD domain
:param policyguid: GUID of the default domain policy
:param policyguid_dc: GUID of the default domain controler policy
"""
policy_path = getpolicypath(sysvolpath,dnsdomain,policyguid)
create_gpo_struct(policy_path)
policy_path = getpolicypath(sysvolpath,dnsdomain,policyguid_dc)
create_gpo_struct(policy_path)
def setup_samdb(path, session_info, provision_backend, lp, names,
logger, domainsid, domainguid, policyguid, policyguid_dc, fill,
adminpass, krbtgtpass, machinepass, invocationid, dnspass, ntdsguid,
serverrole, am_rodc=False, dom_for_fun_level=None, schema=None,
next_rid=1000):
"""Setup a complete SAM Database.
:note: This will wipe the main SAM database file!
"""
# Provision does not make much sense values larger than 1000000000
# as the upper range of the rIDAvailablePool is 1073741823 and
# we don't want to create a domain that cannot allocate rids.
if next_rid < 1000 or next_rid > 1000000000:
error = "You want to run SAMBA 4 with a next_rid of %u, " % (next_rid)
error += "the valid range is %u-%u. The default is %u." % (
1000, 1000000000, 1000)
raise ProvisioningError(error)
# ATTENTION: Do NOT change these default values without discussion with the
# team and/or release manager. They have a big impact on the whole program!
domainControllerFunctionality = DS_DOMAIN_FUNCTION_2008_R2
if dom_for_fun_level is None:
dom_for_fun_level = DS_DOMAIN_FUNCTION_2003
if dom_for_fun_level > domainControllerFunctionality:
raise ProvisioningError("You want to run SAMBA 4 on a domain and forest function level which itself is higher than its actual DC function level (2008_R2). This won't work!")
domainFunctionality = dom_for_fun_level
forestFunctionality = dom_for_fun_level
# Also wipes the database
setup_samdb_partitions(path, logger=logger, lp=lp,
provision_backend=provision_backend, session_info=session_info,
names=names, serverrole=serverrole, schema=schema)
if schema is None:
schema = Schema(domainsid, schemadn=names.schemadn)
# Load the database, but don's load the global schema and don't connect
# quite yet
samdb = SamDB(session_info=session_info, url=None, auto_connect=False,
credentials=provision_backend.credentials, lp=lp,
global_schema=False, am_rodc=am_rodc)
logger.info("Pre-loading the Samba 4 and AD schema")
# Load the schema from the one we computed earlier
samdb.set_schema(schema)
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
# And now we can connect to the DB - the schema won't be loaded from the
# DB
samdb.connect(path)
if fill == FILL_DRS:
return samdb
samdb.transaction_start()
try:
# Set the domain functionality levels onto the database.
# Various module (the password_hash module in particular) need
# to know what level of AD we are emulating.
# These will be fixed into the database via the database
# modifictions below, but we need them set from the start.
samdb.set_opaque_integer("domainFunctionality", domainFunctionality)
samdb.set_opaque_integer("forestFunctionality", forestFunctionality)
samdb.set_opaque_integer("domainControllerFunctionality",
domainControllerFunctionality)
samdb.set_domain_sid(str(domainsid))
samdb.set_invocation_id(invocationid)
logger.info("Adding DomainDN: %s" % names.domaindn)
# impersonate domain admin
admin_session_info = admin_session(lp, str(domainsid))
samdb.set_session_info(admin_session_info)
if domainguid is not None:
domainguid_line = "objectGUID: %s\n-" % domainguid
else:
domainguid_line = ""
descr = b64encode(get_domain_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_basedn.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(domainsid),
"DESCRIPTOR": descr,
"DOMAINGUID": domainguid_line
})
setup_modify_ldif(samdb, setup_path("provision_basedn_modify.ldif"), {
"DOMAINDN": names.domaindn,
"CREATTIME": str(int(time.time() * 1e7)), # seconds -> ticks
"NEXTRID": str(next_rid),
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"POLICYGUID": policyguid,
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"SAMBA_VERSION_STRING": version
})
logger.info("Adding configuration container")
descr = b64encode(get_config_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_configuration_basedn.ldif"), {
"CONFIGDN": names.configdn,
"DESCRIPTOR": descr,
})
# The LDIF here was created when the Schema object was constructed
logger.info("Setting up sam.ldb schema")
samdb.add_ldif(schema.schema_dn_add, controls=["relax:0"])
samdb.modify_ldif(schema.schema_dn_modify)
samdb.write_prefixes_from_schema()
samdb.add_ldif(schema.schema_data, controls=["relax:0"])
setup_add_ldif(samdb, setup_path("aggregate_schema.ldif"),
{"SCHEMADN": names.schemadn})
logger.info("Reopening sam.ldb with new schema")
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
samdb = SamDB(session_info=admin_session_info, auto_connect=False,
credentials=provision_backend.credentials, lp=lp,
global_schema=False, am_rodc=am_rodc)
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
samdb.connect(path)
samdb.transaction_start()
try:
samdb.invocation_id = invocationid
logger.info("Setting up sam.ldb configuration data")
descr = b64encode(get_sites_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_configuration.ldif"), {
"CONFIGDN": names.configdn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"DNSDOMAIN": names.dnsdomain,
"DOMAIN": names.domain,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"FOREST_FUNCTIONALITY": str(forestFunctionality),
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"SITES_DESCRIPTOR": descr
})
logger.info("Setting up display specifiers")
display_specifiers_ldif = read_ms_ldif(
setup_path('display-specifiers/DisplaySpecifiers-Win2k8R2.txt'))
display_specifiers_ldif = substitute_var(display_specifiers_ldif,
{"CONFIGDN": names.configdn})
check_all_substituted(display_specifiers_ldif)
samdb.add_ldif(display_specifiers_ldif)
logger.info("Adding users container")
setup_add_ldif(samdb, setup_path("provision_users_add.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Modifying users container")
setup_modify_ldif(samdb, setup_path("provision_users_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Adding computers container")
setup_add_ldif(samdb, setup_path("provision_computers_add.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Modifying computers container")
setup_modify_ldif(samdb,
setup_path("provision_computers_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Setting up sam.ldb data")
setup_add_ldif(samdb, setup_path("provision.ldif"), {
"CREATTIME": str(int(time.time() * 1e7)), # seconds -> ticks
"DOMAINDN": names.domaindn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
"RIDAVAILABLESTART": str(next_rid + 600),
"POLICYGUID_DC": policyguid_dc
})
setup_modify_ldif(samdb,
setup_path("provision_basedn_references.ldif"), {
"DOMAINDN": names.domaindn})
setup_modify_ldif(samdb,
setup_path("provision_configuration_references.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn})
if fill == FILL_FULL:
logger.info("Setting up sam.ldb users and groups")
setup_add_ldif(samdb, setup_path("provision_users.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(domainsid),
"CONFIGDN": names.configdn,
"ADMINPASS_B64": b64encode(adminpass.encode('utf-16-le')),
"KRBTGTPASS_B64": b64encode(krbtgtpass.encode('utf-16-le'))
})
logger.info("Setting up self join")
setup_self_join(samdb, names=names, invocationid=invocationid,
dnspass=dnspass,
machinepass=machinepass,
domainsid=domainsid,
next_rid=next_rid,
policyguid=policyguid,
policyguid_dc=policyguid_dc,
domainControllerFunctionality=domainControllerFunctionality,
ntdsguid=ntdsguid)
ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
names.ntdsguid = samdb.searchone(basedn=ntds_dn,
attribute="objectGUID", expression="", scope=ldb.SCOPE_BASE)
assert isinstance(names.ntdsguid, str)
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
return samdb
FILL_FULL = "FULL"
FILL_NT4SYNC = "NT4SYNC"
FILL_DRS = "DRS"
SYSVOL_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)"
POLICIES_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001301bf;;;PA)"
def set_dir_acl(path, acl, lp, domsid):
setntacl(lp, path, acl, domsid)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
setntacl(lp, os.path.join(root, name), acl, domsid)
for name in dirs:
setntacl(lp, os.path.join(root, name), acl, domsid)
def set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp):
"""Set ACL on the sysvol/<dnsname>/Policies folder and the policy
folders beneath.
:param sysvol: Physical path for the sysvol folder
:param dnsdomain: The DNS name of the domain
:param domainsid: The SID of the domain
:param domaindn: The DN of the domain (ie. DC=...)
:param samdb: An LDB object on the SAM db
:param lp: an LP object
"""
# Set ACL for GPO root folder
root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
setntacl(lp, root_policy_path, POLICIES_ACL, str(domainsid))
res = samdb.search(base="CN=Policies,CN=System,%s"%(domaindn),
attrs=["cn", "nTSecurityDescriptor"],
expression="", scope=ldb.SCOPE_ONELEVEL)
for policy in res:
acl = ndr_unpack(security.descriptor,
str(policy["nTSecurityDescriptor"])).as_sddl()
policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
set_dir_acl(policy_path, dsacl2fsacl(acl, str(domainsid)), lp,
str(domainsid))
def setsysvolacl(samdb, netlogon, sysvol, gid, domainsid, dnsdomain, domaindn,
lp):
"""Set the ACL for the sysvol share and the subfolders
:param samdb: An LDB object on the SAM db
:param netlogon: Physical path for the netlogon folder
:param sysvol: Physical path for the sysvol folder
:param gid: The GID of the "Domain adminstrators" group
:param domainsid: The SID of the domain
:param dnsdomain: The DNS name of the domain
:param domaindn: The DN of the domain (ie. DC=...)
"""
try:
os.chown(sysvol, -1, gid)
except OSError:
canchown = False
else:
canchown = True
# Set the SYSVOL_ACL on the sysvol folder and subfolder (first level)
setntacl(lp,sysvol, SYSVOL_ACL, str(domainsid))
for root, dirs, files in os.walk(sysvol, topdown=False):
for name in files:
if canchown:
os.chown(os.path.join(root, name), -1, gid)
setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid))
for name in dirs:
if canchown:
os.chown(os.path.join(root, name), -1, gid)
setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid))
# Set acls on Policy folder and policies folders
set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp)
def provision(logger, session_info, credentials, smbconf=None,
targetdir=None, samdb_fill=FILL_FULL, realm=None, rootdn=None,
domaindn=None, schemadn=None, configdn=None, serverdn=None,
domain=None, hostname=None, hostip=None, hostip6=None, domainsid=None,
next_rid=1000, adminpass=None, ldapadminpass=None, krbtgtpass=None,
domainguid=None, policyguid=None, policyguid_dc=None,
invocationid=None, machinepass=None, ntdsguid=None, dnspass=None,
root=None, nobody=None, users=None, wheel=None, backup=None, aci=None,
serverrole=None, dom_for_fun_level=None, ldap_backend_extra_port=None,
ldap_backend_forced_uri=None, backend_type=None, sitename=None,
ol_mmr_urls=None, ol_olc=None, setup_ds_path=None, slapd_path=None,
nosync=False, ldap_dryrun_mode=False, useeadb=False, am_rodc=False,
lp=None):
"""Provision samba4
:note: caution, this wipes all existing data!
"""
if domainsid is None:
domainsid = security.random_sid()
else:
domainsid = security.dom_sid(domainsid)
# create/adapt the group policy GUIDs
# Default GUID for default policy are described at
# "How Core Group Policy Works"
# http://technet.microsoft.com/en-us/library/cc784268%28WS.10%29.aspx
if policyguid is None:
policyguid = DEFAULT_POLICY_GUID
policyguid = policyguid.upper()
if policyguid_dc is None:
policyguid_dc = DEFAULT_DC_POLICY_GUID
policyguid_dc = policyguid_dc.upper()
if adminpass is None:
adminpass = samba.generate_random_password(12, 32)
if krbtgtpass is None:
krbtgtpass = samba.generate_random_password(128, 255)
if machinepass is None:
machinepass = samba.generate_random_password(128, 255)
if dnspass is None:
dnspass = samba.generate_random_password(128, 255)
if ldapadminpass is None:
# Make a new, random password between Samba and it's LDAP server
ldapadminpass=samba.generate_random_password(128, 255)
if backend_type is None:
backend_type = "ldb"
sid_generator = "internal"
if backend_type == "fedora-ds":
sid_generator = "backend"
root_uid = findnss_uid([root or "root"])
nobody_uid = findnss_uid([nobody or "nobody"])
users_gid = findnss_gid([users or "users", 'users', 'other', 'staff'])
if wheel is None:
wheel_gid = findnss_gid(["wheel", "adm"])
else:
wheel_gid = findnss_gid([wheel])
try:
bind_gid = findnss_gid(["bind", "named"])
except KeyError:
bind_gid = None
if targetdir is not None:
smbconf = os.path.join(targetdir, "etc", "smb.conf")
elif smbconf is None:
smbconf = samba.param.default_path()
if not os.path.exists(os.path.dirname(smbconf)):
os.makedirs(os.path.dirname(smbconf))
# only install a new smb.conf if there isn't one there already
if os.path.exists(smbconf):
# if Samba Team members can't figure out the weird errors
# loading an empty smb.conf gives, then we need to be smarter.
# Pretend it just didn't exist --abartlet
data = open(smbconf, 'r').read()
data = data.lstrip()
if data is None or data == "":
make_smbconf(smbconf, hostname, domain, realm,
serverrole, targetdir, sid_generator, useeadb,
lp=lp)
else:
make_smbconf(smbconf, hostname, domain, realm, serverrole,
targetdir, sid_generator, useeadb, lp=lp)
if lp is None:
lp = samba.param.LoadParm()
lp.load(smbconf)
names = guess_names(lp=lp, hostname=hostname, domain=domain,
dnsdomain=realm, serverrole=serverrole, domaindn=domaindn,
configdn=configdn, schemadn=schemadn, serverdn=serverdn,
sitename=sitename)
paths = provision_paths_from_lp(lp, names.dnsdomain)
paths.bind_gid = bind_gid
if hostip is None:
logger.info("Looking up IPv4 addresses")
hostips = samba.interface_ips(lp, False)
if len(hostips) == 0:
logger.warning("No external IPv4 address has been found. Using loopback.")
hostip = '127.0.0.1'
else:
hostip = hostips[0]
if len(hostips) > 1:
logger.warning("More than one IPv4 address found. Using %s.",
hostip)
if serverrole is None:
serverrole = lp.get("server role")
assert serverrole in ("domain controller", "member server", "standalone")
if invocationid is None:
invocationid = str(uuid.uuid4())
if not os.path.exists(paths.private_dir):
os.mkdir(paths.private_dir)
if not os.path.exists(os.path.join(paths.private_dir, "tls")):
os.mkdir(os.path.join(paths.private_dir, "tls"))
ldapi_url = "ldapi://%s" % urllib.quote(paths.s4_ldapi_path, safe="")
schema = Schema(domainsid, invocationid=invocationid,
schemadn=names.schemadn)
if backend_type == "ldb":
provision_backend = LDBBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger)
elif backend_type == "existing":
provision_backend = ExistingBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger,
ldap_backend_forced_uri=ldap_backend_forced_uri)
elif backend_type == "fedora-ds":
provision_backend = FDSBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger, domainsid=domainsid,
schema=schema, hostname=hostname, ldapadminpass=<PASSWORD>,
slapd_path=slapd_path,
ldap_backend_extra_port=ldap_backend_extra_port,
ldap_dryrun_mode=ldap_dryrun_mode, root=root,
setup_ds_path=setup_ds_path,
ldap_backend_forced_uri=ldap_backend_forced_uri)
elif backend_type == "openldap":
provision_backend = OpenLDAPBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger, domainsid=domainsid,
schema=schema, hostname=hostname, ldapadminpass=<PASSWORD>,
slapd_path=slapd_path,
ldap_backend_extra_port=ldap_backend_extra_port,
ldap_dryrun_mode=ldap_dryrun_mode, ol_mmr_urls=ol_mmr_urls,
nosync=nosync,
ldap_backend_forced_uri=ldap_backend_forced_uri)
else:
raise ValueError("Unknown LDAP backend type selected")
provision_backend.init()
provision_backend.start()
# only install a new shares config db if there is none
if not os.path.exists(paths.shareconf):
logger.info("Setting up share.ldb")
share_ldb = Ldb(paths.shareconf, session_info=session_info,
lp=lp)
share_ldb.load_ldif_file_add(setup_path("share.ldif"))
logger.info("Setting up secrets.ldb")
secrets_ldb = setup_secretsdb(paths,
session_info=session_info,
backend_credentials=provision_backend.secrets_credentials, lp=lp)
try:
logger.info("Setting up the registry")
setup_registry(paths.hklm, session_info,
lp=lp)
logger.info("Setting up the privileges database")
setup_privileges(paths.privilege, session_info, lp=lp)
logger.info("Setting up idmap db")
idmap = setup_idmapdb(paths.idmapdb,
session_info=session_info, lp=lp)
logger.info("Setting up SAM db")
samdb = setup_samdb(paths.samdb, session_info,
provision_backend, lp, names, logger=logger,
domainsid=domainsid, schema=schema, domainguid=domainguid,
policyguid=policyguid, policyguid_dc=policyguid_dc,
fill=samdb_fill, adminpass=<PASSWORD>, krbtgtpass=krbtgtpass,
invocationid=invocationid, machinepass=machinepass,
dnspass=dnspass, ntdsguid=ntdsguid, serverrole=serverrole,
dom_for_fun_level=dom_for_fun_level, am_rodc=am_rodc,
next_rid=next_rid)
if serverrole == "domain controller":
if paths.netlogon is None:
logger.info("Existing smb.conf does not have a [netlogon] share, but you are configuring a DC.")
logger.info("Please either remove %s or see the template at %s" %
(paths.smbconf, setup_path("provision.smb.conf.dc")))
assert paths.netlogon is not None
if paths.sysvol is None:
logger.info("Existing smb.conf does not have a [sysvol] share, but you"
" are configuring a DC.")
logger.info("Please either remove %s or see the template at %s" %
(paths.smbconf, setup_path("provision.smb.conf.dc")))
assert paths.sysvol is not None
if not os.path.isdir(paths.netlogon):
os.makedirs(paths.netlogon, 0755)
if samdb_fill == FILL_FULL:
setup_name_mappings(samdb, idmap, str(domainsid), names.domaindn,
root_uid=root_uid, nobody_uid=nobody_uid,
users_gid=users_gid, wheel_gid=wheel_gid)
if serverrole == "domain controller":
# Set up group policies (domain policy and domain controller
# policy)
create_default_gpo(paths.sysvol, names.dnsdomain, policyguid,
policyguid_dc)
setsysvolacl(samdb, paths.netlogon, paths.sysvol, wheel_gid,
domainsid, names.dnsdomain, names.domaindn, lp)
logger.info("Setting up sam.ldb rootDSE marking as synchronized")
setup_modify_ldif(samdb, setup_path("provision_rootdse_modify.ldif"))
secretsdb_self_join(secrets_ldb, domain=names.domain,
realm=names.realm, dnsdomain=names.dnsdomain,
netbiosname=names.netbiosname, domainsid=domainsid,
machinepass=<PASSWORD>, secure_channel_type=SEC_CHAN_BDC)
# Now set up the right msDS-SupportedEncryptionTypes into the DB
# In future, this might be determined from some configuration
kerberos_enctypes = str(ENC_ALL_TYPES)
try:
msg = ldb.Message(ldb.Dn(samdb,
samdb.searchone("distinguishedName",
expression="samAccountName=%s$" % names.netbiosname,
scope=ldb.SCOPE_SUBTREE)))
msg["msDS-SupportedEncryptionTypes"] = ldb.MessageElement(
elements=kerberos_enctypes, flags=ldb.FLAG_MOD_REPLACE,
name="msDS-SupportedEncryptionTypes")
samdb.modify(msg)
except ldb.LdbError, (enum, estr):
if enum != ldb.ERR_NO_SUCH_ATTRIBUTE:
# It might be that this attribute does not exist in this schema
raise
if serverrole == "domain controller":
secretsdb_setup_dns(secrets_ldb, names,
paths.private_dir, realm=names.realm,
dnsdomain=names.dnsdomain,
dns_keytab_path=paths.dns_keytab, dnspass=dnspass)
domainguid = samdb.searchone(basedn=domaindn,
attribute="objectGUID")
assert isinstance(domainguid, str)
# Only make a zone file on the first DC, it should be
# replicated with DNS replication
create_zone_file(lp, logger, paths, targetdir,
dnsdomain=names.dnsdomain, hostip=hostip, hostip6=hostip6,
hostname=names.hostname, realm=names.realm,
domainguid=domainguid, ntdsguid=names.ntdsguid)
create_named_conf(paths, realm=names.realm,
dnsdomain=names.dnsdomain, private_dir=paths.private_dir)
create_named_txt(paths.namedtxt,
realm=names.realm, dnsdomain=names.dnsdomain,
private_dir=paths.private_dir,
keytab_name=paths.dns_keytab)
logger.info("See %s for an example configuration include file for BIND", paths.namedconf)
logger.info("and %s for further documentation required for secure DNS "
"updates", paths.namedtxt)
lastProvisionUSNs = get_last_provision_usn(samdb)
maxUSN = get_max_usn(samdb, str(names.rootdn))
if lastProvisionUSNs is not None:
update_provision_usn(samdb, 0, maxUSN, 1)
else:
set_provision_usn(samdb, 0, maxUSN)
create_krb5_conf(paths.krb5conf,
dnsdomain=names.dnsdomain, hostname=names.hostname,
realm=names.realm)
logger.info("A Kerberos configuration suitable for Samba 4 has been "
"generated at %s", paths.krb5conf)
if serverrole == "domain controller":
create_dns_update_list(lp, logger, paths)
provision_backend.post_setup()
provision_backend.shutdown()
create_phpldapadmin_config(paths.phpldapadminconfig,
ldapi_url)
except Exception:
secrets_ldb.transaction_cancel()
raise
# Now commit the secrets.ldb to disk
secrets_ldb.transaction_commit()
# the commit creates the dns.keytab, now chown it
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.isfile(dns_keytab_path) and paths.bind_gid is not None:
try:
os.chmod(dns_keytab_path, 0640)
os.chown(dns_keytab_path, -1, paths.bind_gid)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.info("Failed to chown %s to bind gid %u",
dns_keytab_path, paths.bind_gid)
logger.info("Please install the phpLDAPadmin configuration located at %s into /etc/phpldapadmin/config.php",
paths.phpldapadminconfig)
logger.info("Once the above files are installed, your Samba4 server will be ready to use")
logger.info("Server Role: %s" % serverrole)
logger.info("Hostname: %s" % names.hostname)
logger.info("NetBIOS Domain: %s" % names.domain)
logger.info("DNS Domain: %s" % names.dnsdomain)
logger.info("DOMAIN SID: %s" % str(domainsid))
if samdb_fill == FILL_FULL:
logger.info("Admin password: %s" % adminpass)
if provision_backend.type is not "ldb":
if provision_backend.credentials.get_bind_dn() is not None:
logger.info("LDAP Backend Admin DN: %s" %
provision_backend.credentials.get_bind_dn())
else:
logger.info("LDAP Admin User: %s" %
provision_backend.credentials.get_username())
logger.info("LDAP Admin Password: %s" %
provision_backend.credentials.get_password())
if provision_backend.slapd_command_escaped is not None:
# now display slapd_command_file.txt to show how slapd must be
# started next time
logger.info("Use later the following commandline to start slapd, then Samba:")
logger.info(provision_backend.slapd_command_escaped)
logger.info("This slapd-Commandline is also stored under: %s/ldap_backend_startup.sh",
provision_backend.ldapdir)
result = ProvisionResult()
result.domaindn = domaindn
result.paths = paths
result.lp = lp
result.samdb = samdb
return result
def provision_become_dc(smbconf=None, targetdir=None,
realm=None, rootdn=None, domaindn=None, schemadn=None, configdn=None,
serverdn=None, domain=None, hostname=None, domainsid=None,
adminpass=<PASSWORD>, krbtgtpass=None, domainguid=None, policyguid=None,
policyguid_dc=None, invocationid=None, machinepass=None, dnspass=None,
root=None, nobody=None, users=None, wheel=None, backup=None,
serverrole=None, ldap_backend=None, ldap_backend_type=None,
sitename=None, debuglevel=1):
logger = logging.getLogger("provision")
samba.set_debug_level(debuglevel)
res = provision(logger, system_session(), None,
smbconf=smbconf, targetdir=targetdir, samdb_fill=FILL_DRS,
realm=realm, rootdn=rootdn, domaindn=domaindn, schemadn=schemadn,
configdn=configdn, serverdn=serverdn, domain=domain,
hostname=hostname, hostip="127.0.0.1", domainsid=domainsid,
machinepass=machinepass, serverrole="domain controller",
sitename=sitename)
res.lp.set("debuglevel", str(debuglevel))
return res
def create_phpldapadmin_config(path, ldapi_uri):
"""Create a PHP LDAP admin configuration file.
:param path: Path to write the configuration to.
"""
setup_file(setup_path("phpldapadmin-config.php"), path,
{"S4_LDAPI_URI": ldapi_uri})
def create_zone_file(lp, logger, paths, targetdir, dnsdomain,
hostip, hostip6, hostname, realm, domainguid,
ntdsguid):
"""Write out a DNS zone file, from the info in the current database.
:param paths: paths object
:param dnsdomain: DNS Domain name
:param domaindn: DN of the Domain
:param hostip: Local IPv4 IP
:param hostip6: Local IPv6 IP
:param hostname: Local hostname
:param realm: Realm name
:param domainguid: GUID of the domain.
:param ntdsguid: GUID of the hosts nTDSDSA record.
"""
assert isinstance(domainguid, str)
if hostip6 is not None:
hostip6_base_line = " IN AAAA " + hostip6
hostip6_host_line = hostname + " IN AAAA " + hostip6
gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
else:
hostip6_base_line = ""
hostip6_host_line = ""
gc_msdcs_ip6_line = ""
if hostip is not None:
hostip_base_line = " IN A " + hostip
hostip_host_line = hostname + " IN A " + hostip
gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
else:
hostip_base_line = ""
hostip_host_line = ""
gc_msdcs_ip_line = ""
dns_dir = os.path.dirname(paths.dns)
try:
shutil.rmtree(dns_dir, True)
except OSError:
pass
os.mkdir(dns_dir, 0775)
# we need to freeze the zone while we update the contents
if targetdir is None:
rndc = ' '.join(lp.get("rndc command"))
os.system(rndc + " freeze " + lp.get("realm"))
setup_file(setup_path("provision.zone"), paths.dns, {
"HOSTNAME": hostname,
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"HOSTIP_BASE_LINE": hostip_base_line,
"HOSTIP_HOST_LINE": hostip_host_line,
"DOMAINGUID": domainguid,
"DATESTRING": time.strftime("%Y%m%d%H"),
"DEFAULTSITE": DEFAULTSITE,
"NTDSGUID": ntdsguid,
"HOSTIP6_BASE_LINE": hostip6_base_line,
"HOSTIP6_HOST_LINE": hostip6_host_line,
"GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
"GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
})
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
# and the SPN update list
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
if paths.bind_gid is not None:
try:
os.chown(dns_dir, -1, paths.bind_gid)
os.chown(paths.dns, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(dns_dir, 0775)
os.chmod(paths.dns, 0664)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
dns_dir, paths.bind_gid))
if targetdir is None:
os.system(rndc + " unfreeze " + lp.get("realm"))
def create_dns_update_list(lp, logger, paths):
"""Write out a dns_update_list file"""
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
def create_named_conf(paths, realm, dnsdomain,
private_dir):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param paths: all paths
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.conf"), paths.namedconf, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"ZONE_FILE": paths.dns,
"REALM_WC": "*." + ".".join(realm.split(".")[1:]),
"NAMED_CONF": paths.namedconf,
"NAMED_CONF_UPDATE": paths.namedconf_update
})
setup_file(setup_path("named.conf.update"), paths.namedconf_update)
def create_named_txt(path, realm, dnsdomain, private_dir,
keytab_name):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.txt"), path, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"DNS_KEYTAB": keytab_name,
"DNS_KEYTAB_ABS": os.path.join(private_dir, keytab_name),
"PRIVATE_DIR": private_dir
})
def create_krb5_conf(path, dnsdomain, hostname, realm):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param dnsdomain: DNS Domain name
:param hostname: Local hostname
:param realm: Realm name
"""
setup_file(setup_path("krb5.conf"), path, {
"DNSDOMAIN": dnsdomain,
"HOSTNAME": hostname,
"REALM": realm,
})
class ProvisioningError(Exception):
"""A generic provision error."""
def __init__(self, value):
self.value = value
def __str__(self):
return "ProvisioningError: " + self.value
class InvalidNetbiosName(Exception):
"""A specified name was not a valid NetBIOS name."""
def __init__(self, name):
super(InvalidNetbiosName, self).__init__(
"The name '%r' is not a valid NetBIOS name" % name)
| 1.789063 | 2 |
oc/data.py | wearelumenai/flowclus | 0 | 12794537 | import flowsim.client as c
get_chunk = c.get_chunk(port=8080)
| 1.320313 | 1 |
sal_ui.py | dvneeseele/SimpleAnimeLibrary | 0 | 12794538 | <filename>sal_ui.py<gh_stars>0
#############################################################################
# dvneeseele
#############################################################################
import os
import sys
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QSize
from PyQt5 import QtCore
from PyQt5.QtGui import QFont, QIcon
from PyQt5.QtWidgets import (QMainWindow, QAbstractItemView, QToolBar, QSplitter, QHBoxLayout, QWidget, QAction, QStackedWidget, QListWidget, QTableWidget, QTableWidgetItem, QTableView)
class salUI(object):
def __init__(self):
super(salUI).__init__()
def setupUI(self, MainWindow):
MainWindow.setWindowIcon(QIcon("icons/saldb_red"))
MainWindow.setWindowTitle("Simple Anime Library | ヽ( ´ー`)ノ")
# setup menubar
menubar = MainWindow.menuBar()
self.mb_file = menubar.addMenu("File")
self.mb_edit = menubar.addMenu("Edit")
self.mb_view = menubar.addMenu("View")
self.mb_help = menubar.addMenu("Help")
# menubar file menu
self.mb_newAction = QAction("New Entry", MainWindow)
self.mb_file.addAction(self.mb_newAction)
# menubar edit menu
self.mb_editEntryAction = QAction("Edit Entry", MainWindow)
self.mb_edit.addAction(self.mb_editEntryAction)
# menubar view menu
# TODO
# menubar help menu
# TODO
# Toolbar
self.toolbar = QToolBar(MainWindow)
self.toolbar.setIconSize(QSize(40, 40))
MainWindow.addToolBar(self.toolbar)
self.addnewAction = QAction(QIcon("icons/add_1.png"),"Create New Entry", MainWindow)
self.addnewAction.setShortcut('Ctrl+N')
self.toolbar.addAction(self.addnewAction)
self.deleteAction = QAction(QIcon("icons/delete_1.png") ,"Delete Entry", MainWindow)
self.deleteAction.setShortcut("Ctrl+D")
self.toolbar.addAction(self.deleteAction)
self.editAction = QAction(QIcon("icons/edit.png") ,"Edit Entry", MainWindow)
self.editAction.setShortcut("Ctrl+E")
self.toolbar.addAction(self.editAction)
self.toolbar.addSeparator()
self.findAction = QAction(QIcon("icons/find.png") ,"Search", MainWindow)
self.findAction.setShortcut("Ctrl+F")
self.toolbar.addAction(self.findAction)
self.queryAction = QAction(QIcon("icons/filter.png") ,"Filter/Sort", MainWindow)
self.queryAction.setShortcut("Ctrl+Alt+Q")
self.toolbar.addAction(self.queryAction)
self.toolbar.addSeparator()
self.settingsAction = QAction(QIcon("icons/settings.png") ,"App Settings", MainWindow)
self.settingsAction.setShortcut("Ctrl+Shift+S")
self.toolbar.addAction(self.settingsAction)
self.infoAction = QAction(QIcon("icons/info.png") ,"App Info", MainWindow)
self.toolbar.addAction(self.infoAction)
#############################################################################################################
self.centralWidget = QWidget(MainWindow)
self.centralWidget.setObjectName('central')
self.splitter = QSplitter(self.centralWidget)
self.splitter.setOrientation(Qt.Horizontal)
self.splitter.setStretchFactor(0, 25)
self.splitter.setStretchFactor(1, 75)
# sidebar (QListWidget)
self.sidebar = QListWidget(self.splitter)
self.sidebar.setObjectName('sidebar')
self.sidebar.addItem('Fin.')
self.sidebar.addItem('Planned')
self.sidebar.addItem('Ongoing')
self.sidebar.addItem('Current')
self.sidebar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# [nevermind lol] pretty sure i dont need this here, it should be created in the backend and added to the stack.
self.watchListTable = QTableWidget()
self.watchListTable.setObjectName('watchListTable')
self.watchListTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.watchListTable.setSelectionMode(QAbstractItemView.SingleSelection)
self.watchListTable.setContextMenuPolicy(Qt.CustomContextMenu)
self.watchListTable.customContextMenuRequested.connect(self.tableContextMenu)
self.watchListTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.watchListTable.setFont(QFont('Arial', 14))
self.watchListTable.setWordWrap(False)
#self.watchListTable.setTextAlignment(Qt.AlignCenter)
self.watchListTable.setColumnCount(7)
self.watchListTable.setHorizontalHeaderLabels(["Art", "Title", "English Title", "SUB/DUB", "Start Date" , "Completion Date", "Series Type"])
self.watchListTable.verticalHeader().setDefaultSectionSize(140)
self.watchListTable.horizontalHeader().setDefaultSectionSize(120)
# stackwidget to switch contents of list catagories
self.tableStack = QStackedWidget(self.splitter)
self.tableStack.setObjectName('tablestack')
self.tableStack.addWidget(self.watchListTable)
# add widgets to splitter
self.splitter.addWidget(self.sidebar)
self.splitter.addWidget(self.tableStack)
self.splitter.setSizes([50, 650])
########################################################################################################
########################################################################################################
self.boxLayout = QHBoxLayout()
self.centralWidget.setLayout(self.boxLayout)
MainWindow.setCentralWidget(self.centralWidget)
self.boxLayout.addWidget(self.splitter)
MainWindow.show()
| 2.28125 | 2 |
PhotoManagementSystem/PhotoManager/Library/facep.py | 39M/PhotoTheater | 1 | 12794539 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
import requests
API_KEY = '<KEY>'
API_SECRET = '<KEY>'
API_URL = 'http://apicn.faceplusplus.com'
def detect(path):
data = {
'api_key': API_KEY,
'api_secret': API_SECRET,
}
files = {
'img': open(path, 'rb'),
}
r = requests.post(API_URL + '/detection/detect',
data=data,
files=files)
try:
face_id = r.json()["face"][0]["face_id"]
data = {
'api_key': API_KEY,
'api_secret': API_SECRET,
'face_id': face_id
}
result = requests.post(API_URL + '/detection/landmark',
data=data)
return result.json()
except:
return -1
# detect(u'source.jpg')
| 2.671875 | 3 |
grasshopper/__init__.py | aholyoke/grasshopper | 0 | 12794540 | from .framework import Framework
| 1.117188 | 1 |
bot.py | joonsauce/sus-bot | 1 | 12794541 | <reponame>joonsauce/sus-bot<gh_stars>1-10
# links the main bot file with the other feature files
# links to the redesigned help command
from help import *
# links to the random sus meme feature
from redditAPI import *
# links to the roll features
from roll import *
# links to the various bot settings
from setting import *
# links to the different "sus" features
from sus import *
# links to the soundboard features
from music import *
# test command; used for test purposes to debug things; just a short message for now but you can use it for whatever other purpose you want
@bot.command()
async def test(ctx):
# sends the message in the quote
await ctx.send("You think you're funny huh?")
# makes the bot run with the bot token
bot.run(bot_token)
| 2.1875 | 2 |
kostalpiko/const.py | rcasula/KostalPyko | 0 | 12794542 | <reponame>rcasula/KostalPyko
BASE_INDICES = {
"current_power": 0,
"total_energy": 1,
"daily_energy": 2,
"string1_voltage": 3,
"l1_voltage": 4,
"string1_current": 5,
"l1_power": 6,
}
SINGLE_STRING_INDICES = {
**BASE_INDICES,
"status": 7
}
DOUBLE_STRING_INDICES = {
**BASE_INDICES,
"string2_voltage": 7,
"l2_voltage": 8,
"string2_current": 9,
"l2_power": 10,
"status": 11
}
TRIPLE_STRING_INDICES = {
**BASE_INDICES,
"string2_voltage": 7,
"l2_voltage": 8,
"string2_current": 9,
"l2_power": 10,
"string3_voltage": 11,
"l3_voltage": 12,
"string3_current": 13,
"l3_power": 14,
"status": 15
} | 1.453125 | 1 |
bokeh/util/api.py | areaweb/bokeh | 1 | 12794543 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide functions for declaring Bokeh API information.
Within the Bokeh codebase, functions, classes, methods, and properties may
be defined to be "public" or "internal", as well as note what Bokeh version
the object was first introduced in.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
logger = logging.getLogger(__name__)
# This one module is exempted from this :)
# from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.string import nice_join, format_docstring
from .future import wraps
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
INTERNAL = 'internal'
PUBLIC = 'public'
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------o
def internal(version):
''' Declare an object to be ``'public'``, introduced in ``version``.
This decorator annotates a function or class with information about what
version it was first introduced in, as well as that it is part of the
internal API. Specifically, the decorated object will have attributes:
.. code-block:: python
__bkversion__ = version
__bklevel__ = {internal}
Args:
version (tuple) :
A version tuple ``(x,y,z)`` stating what version this object was
introduced.
Returns:
Class or Function
'''
return _access(version, 'internal')
internal.__doc__ = format_docstring(internal.__doc__, internal=repr(INTERNAL))
def is_declared(obj):
'''
Args:
obj (object) :
The function, class, method, or property to test
Returns:
bool
'''
return hasattr(obj, '__bklevel__') and hasattr(obj, '__bkversion__')
def is_level(obj, level):
'''
Args:
obj (object) :
The function, class, method, or property to declare a level for
level ({public} or {internal})
Whether to declare the object public or internal
Returns:
bool
'''
if level not in _LEVELS:
raise ValueError("Unknown API level %r, expected %s" % (level, nice_join(_LEVELS)))
return obj.__bklevel__ == level
is_level.__doc__ = format_docstring(is_level.__doc__, public=repr(PUBLIC), internal=repr(INTERNAL))
def is_version(obj, version):
'''
Args:
obj (object) :
The function, class, method, or property to declare a version for
Returns:
bool
'''
return obj.__bkversion__ == version
def public(version):
''' Declare an object to be ``'public'``, introduced in ``version``.
This decorator annotates a function or class with information about what
version it was first introduced in, as well as that it is part of the
internal API. Specifically, the decorated object will have attributes:
.. code-block:: python
__bkversion__ = version
__bklevel__ = {public}
Args:
version (tuple) :
A version tuple ``(x,y,z)`` stating what version this object was
introduced.
Returns:
Class or Function
'''
return _access(version, 'public')
public.__doc__ = format_docstring(public.__doc__, public=repr(PUBLIC))
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_LEVELS = [PUBLIC, INTERNAL]
def _access(version, level):
''' Declare an object to be ``{{ level }}``, introduced in ``version``.
This generic decorator annotates a function or class with information about
what version it was first introduced in, as well as whether it is a public
or internal API level. Specifically, the decorated object will have
attributes:
.. code-block:: python
__bkversion__ = version
__bklevel__ = level
Args:
version (tuple) :
A version tuple ``(x,y,z)`` stating what version this object was
introduced.
level: (str)
Whether this object is ``'public'`` or ``'internal'``
Returns:
Class or Function
'''
assert level in _LEVELS
def decorator(obj):
# Keep track of how many public/internal things there are declared
# in a module so we can make sure api tests are comprehensive
mod = _get_module(obj)
_increment_api_count(mod, level)
# If we are decorating a class
if isinstance(obj, type):
obj.__bkversion__ = version
obj.__bklevel__ = level
return obj
# Otherwise we are decorating a function or method
@wraps(obj)
def wrapper(*args, **kw):
return obj(*args, **kw)
wrapper.__bkversion__ = version
wrapper.__bklevel__ = level
return wrapper
return decorator
def _get_module(obj):
''' Given an function, class, method, or property, return the module
that is was defined in.
This function is written with the usages of the Bokeh codebase in
mind, and may not work in general
'''
import sys
if isinstance(obj, property):
modname = obj.fget.__module__
else:
modname = obj.__module__
return sys.modules[modname]
def _increment_api_count(mod, level):
''' Updates the __bkapi__ dict on a module, creating a new one if necessary
'''
if not hasattr(mod, '__bkapi__'):
mod.__bkapi__ = {PUBLIC: 0, INTERNAL:0}
mod.__bkapi__[level] += 1
| 1.671875 | 2 |
Server/app/docs/v2/admin/excel/__init__.py | moreal/DMS-Backend | 27 | 12794544 | <filename>Server/app/docs/v2/admin/excel/__init__.py
from app.docs.v2 import jwt_header
def generate_excel_doc(type):
return {
'tags': ['[Admin] 신청 정보'],
'description': '{}신청 정보를 다운로드합니다.'.format(type),
'parameters': [jwt_header],
'responses': {
'200': {
'description': '{}신청 정보가 담긴 엑셀 파일과 Cache-Control: no-cache 헤더를 함께 응답합니다.'.format(type)
},
'403': {
'description': '권한 없음'
}
}
}
| 2.140625 | 2 |
imgprocess.py | Xanadu12138/DSCN-superpixels | 4 | 12794545 | # Functions of img processing.
from functools import total_ordering
import config
import numpy as np
import copy
import torch
import cv2
from skimage.color import rgb2gray
from XCSLBP import XCSLBP
def extractPixelBlock(originalImg, labels):
'''
input_param:
originalImg: Original pixels matrix that squeezed to 2 dimentions of input img. np.ndarray
labels: label matrix of input img. np.ndarray
output_param:
pixelBlockList: a list contains all pixelblock which incoporates same label pixels.
'''
# Copy a new labels due to max() function alters dimentions of its parameter
newLabels = copy.deepcopy(labels)
maxLabel = max(newLabels)
pixelBlockList = []
labels = labels.reshape(-1,1)
blankBlock = np.array([255, 255, 255])
for i in range(maxLabel + 1):
# Uncomment line24 and comment line25 to visualize pixelBlock.
# pixelBlock = [pixel if label == i else blankBlock for pixel, label in zip(originalImg, labels)]
pixelBlock = [pixel if label == i else config.blankBlock for pixel, label in zip(originalImg, labels)]
pixelBlock = np.array(pixelBlock)
pixelBlock = pixelBlock.reshape(config.imgSize[0], config.imgSize[1], -1)
pixelBlockList.append(pixelBlock)
return pixelBlockList
def extractFeature(pixelBlockList):
'''
input_param:
pixelBlockList: A list contains all element.
output_param:
featureList: A list contains each element's feature. feature contains 3 channel's mean value and mean position info.
'''
featureList = []
for i in range(len(pixelBlockList)):
pixelList = []
locationList = []
for y in range(len(pixelBlockList[0])):
for x in range(len(pixelBlockList[1])):
if (pixelBlockList[i][y][x] != config.blankBlock).any():
pixelList.append(list(pixelBlockList[i][y][x]))
locationList.append((x,y))
colorFeature = np.mean(np.array(pixelList), axis=0)
locationFeature = np.mean(np.array(locationList), axis=0)
features = np.append(colorFeature, locationFeature)
featureList.append(features)
featureList = np.array(featureList)
return featureList
# Optimized version
def regionColorFeatures(img, labels):
'''
input_param:
img: img matrix. torch.tensor
labels: Kmeans clustering labels. torch.tensor
output_param:
colorFeatureList: A list contains each element's feature. feature contains 3 channel's mean value.
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
colorFeatureList = []
grayFrame = torch.tensor(rgb2gray(img))
redFrame = img[:, :, 0]
greenFrame = img[:, :, 1]
blueFrame = img[:, :, 2]
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
graySpLocal = torch.mean(grayFrame[f].float())
redSpLocal = torch.mean(redFrame[f].float())
greenSpLocal = torch.mean(greenFrame[f].float())
blueSpLocal = torch.mean(blueFrame[f].float())
colorFeature = [redSpLocal, greenSpLocal, blueSpLocal, graySpLocal]
colorFeatureList.append(colorFeature)
colorFeatureList = torch.tensor(colorFeatureList)
return colorFeatureList
def regionTextureFeatures(img, labels):
'''
input_param:
img: CV2.imread
labels
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
# I = rgb2gray(img)
XCS = XCSLBP(img)
XCS = XCS * (255/ 16)
XCSframe = torch.tensor(XCS)
textureFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
XCSSpLocal = torch.mean(XCSframe[f].float())
textureFeatureList.append(XCSSpLocal)
textureFeatureList = torch.tensor(textureFeatureList)
textureFeatureList = textureFeatureList.unsqueeze(1)
return textureFeatureList
def regionEdgeFeatures(img, labels):
'''
input_param:
img: CV2.imread
labels
'''
numlab = max(labels)
rlabels = labels.view(config.imgSize)
# frame = rgb2gray(img)
Gx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
Gy = cv2.Sobel(img, cv2.CV_64F, 0, 1)
Gmag = np.sqrt(Gx**2.0 + Gy**2.0)
Gdir = np.arctan2(Gy, Gx) * (180 / np.pi)
Gx, Gy, Gmag, Gdir = torch.tensor(Gx), torch.tensor(Gy), torch.tensor(Gmag), torch.tensor(Gdir)
edgeFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
GxSpLocal = torch.mean(Gx[f].float())
GySpLocal = torch.mean(Gy[f].float())
GmagSpLocal = torch.mean(Gmag[f].float())
GdirSpLocal = torch.mean(Gdir[f].float())
edgeFeature = [GxSpLocal, GySpLocal, GmagSpLocal, GdirSpLocal]
edgeFeatureList.append(edgeFeature)
edgeFeatureList = torch.tensor(edgeFeatureList)
return edgeFeatureList
def regionSpatialFeatures(labels):
numlab = max(labels)
rlabels = labels.view(config.imgSize)
col, row = config.imgSize
x = range(1, col + 1)
y = range(1, row + 1)
Sx, Sy = np.meshgrid(y, x)
Sx, Sy = torch.tensor(Sx), torch.tensor(Sy)
spatialFeatureList = []
for i in range(numlab + 1):
f = torch.eq(rlabels, i)
SxSpLocal = torch.mean(Sx[f].float())
SySpLocal = torch.mean(Sy[f].float())
spatialFeature = [SxSpLocal, SySpLocal]
spatialFeatureList.append(spatialFeature)
spatialFeatureList = torch.tensor(spatialFeatureList)
return spatialFeatureList | 2.96875 | 3 |
python/DeepSeaSceneLighting/Convert/LightSetConvert.py | akb825/DeepSea | 5 | 12794546 | # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flatbuffers
import math
from .. import DirectionalLight
from .. import Light
from ..LightUnion import LightUnion
from .. import PointLight
from .. import SceneLightSet
from .. import SpotLight
from DeepSeaScene.Color3f import CreateColor3f
from DeepSeaScene.Vector3f import CreateVector3f
class Object:
pass
def convertLightSet(convertContext, data):
"""
Converts a light set for a scene. The data map is expected to contain the following elements:
- lights: array of lights to initially populate the light set with. Each member of the array
has the following members:
- name: the name of the light.
- color: the color of the light as an array of three float values, typically in the range
[0, 1].
- intensity: the intensity of the light, which multiplies the color.
- type: the type of the light. The following types are supported with the members they expect:
- "Directional"
- direction: direction of the light as an array of three float values.
- "Point"
- position: position of the light as an array of three float values.
- linearFalloff: amount the light falls off based on distance.
- quadraticFalloff: amount the light falls off based on squared distance. Defaults to 1.
- "Spot"
- position: position of the light as an array of three float values.
- direction: direction of the light as an array of three float values.
- linearFalloff: amount the light falls off based on distance. Defaults to 1.
- quadraticFalloff: amount the light falls off based on squared distance. Defaults to 1.
- innerSpotAngle: the angle in degrees of the spot light where it starts to fade out.
- outerSpotAngle: the angle in degrees of the spot light where it finishes fade out.
- maxLights: the maximum number of lights that can be stored. If unset, the number of elements
in lights will be used.
- ambientColor: the color of the ambient light as an array of three floats, typically in the
range [0,1]. Defaults to all 0.
- ambientIntensity: the intensity of the ambient light, which multiplies the color. Defaults
to 0.
- mainLight: the name of the main light. If omitted no light will be considered the main light.
- srgb: true to treat all color values as sRGB values to be converted to linear space. Defaults
to false.
"""
def readFloat(value, name, minVal = None, maxVal = None):
try:
floatVal = float(value)
if (minVal is not None and floatVal < minVal) or \
(maxVal is not None and floatVal > maxVal):
raise Exception() # Common error handling in except block.
return floatVal
except:
raise Exception('Invalid ' + name + ' value "' + str(value) + '".')
def readInt(value, name, minVal):
try:
intVal = int(value)
if intVal < minVal:
raise Exception() # Common error handling in except block.
return intVal
except:
raise Exception('Invalid ' + name + ' value "' + str(value) + '".')
def readColor(value, name, srgb):
if not isinstance(value, list) or len(value) != 3:
raise Exception('SceneLight ' + name + ' must be an array of three floats.')
color = [readFloat(value[0], name + ' red channel'),
readFloat(value[1], name + ' green channel'),
readFloat(value[2], name + ' blue channel')]
if srgb:
for i in range(0, 3):
if color[i] <= 0.04045:
color[i] = color[i]/12.92
else:
color[i] = pow((color[i] + 0.055)/1.055, 2.4)
return color
def readVector(value, name):
if not isinstance(value, list) or len(value) != 3:
raise Exception('SceneLight ' + name + ' must be an array of three floats.')
return [readFloat(value[0], name + ' x'),
readFloat(value[1], name + ' y'),
readFloat(value[2], name + ' z')]
try:
srgb = data.get('srgb', False)
lightsData = data.get('lights', [])
lights = []
try:
for lightData in lightsData:
try:
light = Object()
light.name = str(lightData['name'])
light.color = readColor(lightData['color'], 'light color', srgb)
light.intensity = readFloat(lightData['intensity'], 'light intensity', 0.0)
lightType = lightData['type']
if lightType == 'Directional':
light.type = LightUnion.DirectionalLight
light.direction = readVector(lightData['direction'], 'light direction')
elif lightType == 'Point':
light.type = LightUnion.PointLight
light.position = readVector(lightData['position'], 'light position')
light.linearFalloff = readFloat(lightData.get('linearFalloff', 1.0),
'light linear falloff', 0.0)
light.quadraticFalloff = readFloat(lightData.get('quadraticFalloff', 1.0),
'light quadratic falloff', 0.0)
elif lightType == 'Spot':
light.type = LightUnion.SpotLight
light.position = readVector(lightData['position'], 'light position')
light.direction = readVector(lightData['direction'], 'light direction')
light.linearFalloff = readFloat(lightData.get('linearFalloff', 1.0),
'light linear falloff', 0.0)
light.quadraticFalloff = readFloat(lightData.get('quadraticFalloff', 1.0),
'light quadratic falloff', 0.0)
light.innerSpotAngle = math.radians(readFloat(lightData['innerSpotAngle'],
'inner spot angle', 0.0, 180.0))
light.outerSpotAngle = math.radians(readFloat(lightData['outerSpotAngle'],
'outer spot angle', 0.0, 180.0))
if light.innerSpotAngle > light.outerSpotAngle:
raise Exception(
'Spot light inner spot angle must be less than outer spot angle.')
except KeyError as e:
raise Exception('LightSet light doesn\'t contain element ' + str(e) + '.')
lights.append(light)
except (TypeError, ValueError):
raise Exception('SceneLights "lights" must be an array of objects.')
maxLights = readInt(data.get('maxLights', 0), 'maxLights', 0)
if not maxLights and not lights:
raise Exception('SceneLights cannot have zero max lights.')
ambientColorData = data.get('ambientColor')
if ambientColorData:
ambientColor = readColor(ambientColorData, 'ambient color', srgb)
else:
ambientColor = None
ambientIntensity = readFloat(data.get('ambientIntensity', 0.0), 'ambient intensity', 0.0)
mainLight = str(data.get('mainLight', ''))
except KeyError as e:
raise Exception('LightSet doesn\'t contain element ' + str(e) + '.')
except (AttributeError, TypeError, ValueError):
raise Exception('LightSet must be an object.')
builder = flatbuffers.Builder(0)
lightOffsets = []
for light in lights:
nameOffset = builder.CreateString(light.name)
if light.type == LightUnion.DirectionalLight:
DirectionalLight.Start(builder)
DirectionalLight.AddDirection(builder, CreateVector3f(builder, light.direction[0],
light.direction[1], light.direction[2]))
DirectionalLight.AddColor(builder, CreateColor3f(builder, light.color[0],
light.color[1], light.color[2]))
DirectionalLight.AddIntensity(builder, light.intensity)
lightUnionOffset = DirectionalLight.End(builder)
elif light.type == LightUnion.PointLight:
PointLight.Start(builder)
PointLight.AddPosition(builder, CreateVector3f(builder, light.position[0],
light.position[1], light.position[2]))
PointLight.AddColor(builder, CreateColor3f(builder, light.color[0], light.color[1],
light.color[2]))
PointLight.AddIntensity(builder, light.intensity)
PointLight.AddLinearFalloff(builder, light.linearFalloff)
PointLight.AddQuadraticFalloff(builder, light.quadraticFalloff)
lightUnionOffset = PointLight.End(builder)
elif light.type == LightUnion.SpotLight:
SpotLight.Start(builder)
SpotLight.AddPosition(builder, CreateVector3f(builder, light.position[0],
light.position[1], light.position[2]))
SpotLight.AddDirection(builder, CreateVector3f(builder, light.direction[0],
light.direction[1], light.direction[2]))
SpotLight.AddColor(builder, CreateColor3f(builder, light.color[0], light.color[1],
light.color[2]))
SpotLight.AddIntensity(builder, light.intensity)
SpotLight.AddLinearFalloff(builder, light.linearFalloff)
SpotLight.AddQuadraticFalloff(builder, light.quadraticFalloff)
SpotLight.AddInnerSpotAngle(builder, light.innerSpotAngle)
SpotLight.AddOuterSpotAngle(builder, light.outerSpotAngle)
lightUnionOffset = SpotLight.End(builder)
Light.Start(builder)
Light.AddName(builder, nameOffset)
Light.AddLightType(builder, light.type)
Light.AddLight(builder, lightUnionOffset)
lightOffsets.append(Light.End(builder))
if lightOffsets:
SceneLightSet.StartLightsVector(builder, len(lightOffsets))
for offset in reversed(lightOffsets):
builder.PrependUOffsetTRelative(offset)
lightsOffset = builder.EndVector()
else:
lightsOffset
mainLightOffset = 0
if mainLight:
mainLightOffset = builder.CreateString(mainLight)
SceneLightSet.Start(builder)
SceneLightSet.AddLights(builder, lightsOffset)
SceneLightSet.AddMaxLights(builder, maxLights)
SceneLightSet.AddAmbientColor(builder,
CreateColor3f(builder, ambientColor[0], ambientColor[1], ambientColor[2]) if ambientColor
else 0)
SceneLightSet.AddAmbientIntensity(builder, ambientIntensity)
SceneLightSet.AddMainLight(builder, mainLightOffset)
builder.Finish(SceneLightSet.End(builder))
return builder.Output()
| 2.75 | 3 |
cookbook/data_structures/keep_last_items.py | brittainhard/py | 0 | 12794547 | """
Deque is double-ended-queue. Lets you append and prepend to a list. Faster at
finding stuff I guess?
It is O(1) of memory use when inserting or popping, but lists are O(N).
"""
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for line in lines:
if pattern in line:
yield line, previous_lines
previous_lines.append(line)
with open("potato.txt") as f:
for line, prevlines in search(f, "python", 3):
for pline in prevlines:
print(pline, end="")
print(line, end="")
print("-" * 20)
"""
You don't have to delete items from a deque. They are automatically deleted if
you add more items than the maxlength allows.
You can just use deques as lists too.
"""
p = deque(maxlen=3)
p.append(1)
p.append(2)
p.append(3)
p.append(4)
print(p)
p.appendleft(5)
print(p)
| 3.984375 | 4 |
Dalitz_simplified/optimisation/miranda/miranda_eval.py | weissercn/MLTools | 0 | 12794548 | <filename>Dalitz_simplified/optimisation/miranda/miranda_eval.py<gh_stars>0
"""
This script can be used to get the p value for the Miranda method (=chi squared). It takes input files with column vectors corresponding to
features and lables.
"""
print(__doc__)
import sys
sys.path.insert(0,'../..')
import os
from scipy import stats
import p_value_scoring_object
import numpy as np
import matplotlib.pyplot as plt
import numpy.matlib
from matplotlib.colors import Normalize
import matplotlib.ticker as mtick
import matplotlib
from sklearn.preprocessing import StandardScaler
##############################################################################
# Setting parameters
#
name="gaussian_same_projection_on_each_axis_0_1vs0_05_optimisation_miranda"
sample1_name="particle"
sample2_name="antiparticle"
shuffling_seed = 100
single_no_bins_list=[2,3,4,5,6,7,8,9,10,11,12,15,17,20,22,25,27,30,35]
comp_file_list=[]
#Dalitz
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data_optimisation.0.0.txt",os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data_optimisation.200.1.txt")]
#Gaussian
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high10Dgauss_optimisation_10000_0.5_0.1_0.0_1.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high10Dgauss_optimisation_10000_0.5_0.1_0.01_1.txt")]
# gaussian_same_projection_on_each_axis
comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_2D_1000_0.6_0.2_0.1_1.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_2D_1000_0.6_0.2_0.05_1.txt")]
#Legendre
#legendre one contribution, 100th vs 99th legendre polynomial
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1__100__sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1__100__sample_1.txt")]
#legendreSquared one contribution, 10th vs 9th legendre polynomial
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_squared_contrib0__1__10__sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_squared_contrib0__1__9__sample_0.txt")]
#4 contributions 1D
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__contrib1__0_5__1_0__contrib2__2_0__2_0__contrib3__0_7__3_0__sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__contrib1__0_0__1_0__contrib2__2_0__2_0__contrib3__0_7__3_0__sample_0.txt")]
#4 contributions 3D
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__1_0__2_0__contrib1__0_5__1_0__2_0__3_0__contrib2__2_0__2_0__3_0__0_0__contrib3__0_7__3_0__0_0__1_0__sample_0.txt", os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__1_0__2_0__contrib1__0_0__1_0__2_0__3_0__contrib2__2_0__2_0__3_0__0_0__contrib3__0_7__3_0__0_0__1_0__sample_0.txt")]
#4 contributions 4D
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__1_0__2_0__3_0__contrib1__0_5__1_0__2_0__3_0__0_0__contrib2__2_0__2_0__3_0__0_0__1_0__contrib3__0_7__3_0__0_0__1_0__2_0__sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1_0__0_0__1_0__2_0__3_0__contrib1__0_0__1_0__2_0__3_0__0_0__contrib2__2_0__2_0__3_0__0_0__1_0__contrib3__0_7__3_0__0_0__1_0__2_0__sample_0.txt")]
# Sine with 10 periods vs 9 periods
#comp_file_list=[(os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_sin_10_periods_1D_sample_0.txt",os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_sin_9_periods_1D_sample_0.txt")]
print(comp_file_list)
score_list=[]
##############################################################################
comp_file_0,comp_file_1 = comp_file_list[0]
for single_no_bins in single_no_bins_list:
print("single_no_bins : ",single_no_bins)
print("Operating of files :"+comp_file_0+" "+comp_file_1)
#extracts data from the files
features_0=np.loadtxt(comp_file_0,dtype='d')
features_1=np.loadtxt(comp_file_1,dtype='d')
#determine how many data points are in each sample
no_0=features_0.shape[0]
no_1=features_1.shape[0]
#Give all samples in file 0 the label 0 and in file 1 the feature 1
label_0=np.zeros((no_0,1))
label_1=np.ones((no_1,1))
#Create an array containing samples and features.
data_0=np.c_[features_0,label_0]
data_1=np.c_[features_1,label_1]
data=np.r_[data_0,data_1]
no_dim = data.shape[1]-1
no_bins = [single_no_bins]*no_dim
np.random.shuffle(data)
labels=data[:,-1]
X_values= data[:,:-1]
X_max = np.amax(data,axis=0)[:-1]
X_min = np.amin(data,axis=0)[:-1]
X_width = (np.divide(np.subtract(X_max,X_min),no_bins))
#print(X_width)
setup_command_0 = "bins_sample0=np.zeros(("
setup_command_1 = "bins_sample1=np.zeros(("
for dim in range(no_dim):
setup_command_0 += str(int(no_bins[dim]))+","
setup_command_1 += str(int(no_bins[dim]))+","
setup_command_0=setup_command_0[:-1]+"))"
setup_command_1=setup_command_1[:-1]+"))"
exec setup_command_0
exec setup_command_1
for i in range(no_0+no_1):
#bin position
#x_bin=int(np.floor((Xx_values[i]-Xx_min)/Xx_width))
#y_bin=int(np.floor((Xy_values[i]-Xy_min)/Xy_width))
pos_bins=np.floor(np.divide(np.subtract(X_values[i,:],X_min[:]),X_width[:]))
#print(pos_bins)
#eliminate boundary effects
for dim in range(no_dim):
if(pos_bins[dim]==no_bins[dim]):
pos_bins[dim] -=1
#if(pos_bins[0]==no_bins[0]):
#pos_bins[0] -=1
bin_command_0 = "bins_sample0["
bin_command_1 = "bins_sample1["
for dim in range(no_dim):
bin_command_0 += str(int(pos_bins[dim]))+","
bin_command_1 += str(int(pos_bins[dim]))+","
bin_command_0=bin_command_0[:-1]+"]"
bin_command_1=bin_command_1[:-1]+"]"
#print("labels[i]: {0}".format(str(int(labels[i]))))
#print(bin_command_0)
if(labels[i]==0):
#print(bin_command_0)
#bins_sample0[y_bin,x_bin] +=1
exec bin_command_0 + "+=1"
#eval(bin_command_0)
#print("labels[i]: {0}".format(str(int(labels[i]))))
else:
#bins_sample1[y_bin,x_bin] +=1
exec bin_command_1 + "+=1"
#print("labels[i]: {0}".format(str(int(labels[i]))))
if __debug__:
print("bins_sample0 : ",bins_sample0)
print("np.sum(bins_sample0) : ",np.sum(bins_sample0))
print("bins_sample1 : ",bins_sample1)
print("np.sum(bins_sample1) : ",np.sum(bins_sample1))
#element wise subtraction and division
Scp2 = np.divide(np.square(np.subtract(bins_sample1,bins_sample0)),np.add(bins_sample1,bins_sample0))
if __debug__:
print("Scp2 : ", Scp2)
#nansum ignores all the contributions that are Not A Number (NAN)
Chi2 = np.nansum(Scp2)
if __debug__:
print("Chi2 : ", Chi2)
dof=no_bins[0]
for dim in range(1,no_dim):
dof *= no_bins[1]
dof-=1
print(bins_sample0)
print(bins_sample1)
print("Chi2/dof : {0}".format(str(Chi2/dof)))
pvalue= 1 - stats.chi2.cdf(Chi2,dof)
print("pvalue : {0}".format(str(pvalue)))
print("dof : ",dof)
#import scipy.stats
#chi2_sp_cont, p_sp_cont, dof_sp_cont, expected_sp_cont = stats.chi2_contingency([bins_sample1.flatten(),bins_sample0.flatten()])
#print("(chi2_sp_cont, p_sp_cont, dof_sp_cont, expected_sp_cont) : ",(chi2_sp_cont, p_sp_cont, dof_sp_cont, expected_sp_cont))
score_list.append(pvalue)
import csv
with open(name+"_values", "wb") as test_statistics_file:
test_statistics_file.write("nbins \t pvalue \n")
writer = csv.writer(test_statistics_file, delimiter='\t', lineterminator='\n')
writer.writerows(zip(single_no_bins_list,score_list))
fig= plt.figure()
ax1= fig.add_subplot(1, 1, 1)
ax1.plot(single_no_bins_list,score_list,'o')
print("single_no_bins_list[0]-0.1",single_no_bins_list[0]-0.1)
print("single_no_bins_list[-1]+0.1",single_no_bins_list[-1]+0.1)
ax1.set_yscale('log')
plt.xlim([single_no_bins_list[0]-0.1,single_no_bins_list[-1]+0.1])
plt.ylim([min(score_list)*0.8,max(score_list)*1.2])
#Make 6.0-1.0=5 ticks between the min and max
no_ticks=5.0
ticks_list= np.power(min(score_list)/max(score_list),(np.arange(no_ticks+1.0))/no_ticks)*max(score_list)
ax1.set_yticks(ticks_list)
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax1.set_xlabel("number of bins per axis")
ax1.set_ylabel("pvalue")
ax1.set_title(name)
fig.savefig(name+"_plot.png")
| 2.703125 | 3 |
backend/util/response/search_products_results/__init__.py | willrp/willstores-ws | 4 | 12794549 | <reponame>willrp/willstores-ws<filename>backend/util/response/search_products_results/__init__.py
from .search_products_results_response import SearchProductsResultsResponse
from .search_products_results_schema import SearchProductsResultsSchema
| 1.21875 | 1 |
tests/pyspark/wranglers/test_interval_identifier.py | TobiasRasbold/pywrangler | 14 | 12794550 | """This module contains tests for pyspark interval identifier.
isort:skip_file
"""
import pandas as pd
import pytest
from pywrangler.util.testing import PlainFrame
pytestmark = pytest.mark.pyspark # noqa: E402
pyspark = pytest.importorskip("pyspark") # noqa: E402
from tests.test_data.interval_identifier import (
CollectionGeneral,
CollectionIdenticalStartEnd,
CollectionMarkerSpecifics,
CollectionNoOrderGroupBy,
MultipleIntervalsSpanningGroupbyExtendedTriple,
ResultTypeRawIids,
ResultTypeValidIids
)
from pywrangler.pyspark.wranglers.interval_identifier import (
VectorizedCumSum,
VectorizedCumSumAdjusted
)
WRANGLER = (VectorizedCumSum, VectorizedCumSumAdjusted)
WRANGLER_IDS = [x.__name__ for x in WRANGLER]
WRANGLER_KWARGS = dict(argnames='wrangler',
argvalues=WRANGLER,
ids=WRANGLER_IDS)
@pytest.mark.parametrize(**WRANGLER_KWARGS)
@CollectionGeneral.pytest_parametrize_kwargs("marker_use")
@CollectionGeneral.pytest_parametrize_testcases
def test_base(testcase, wrangler, marker_use):
"""Tests against all available wranglers and test cases.
Parameters
----------
testcase: DataTestCase
Generates test data for given test case.
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
marker_use: dict
Defines the marker start/end use.
"""
# instantiate test case
testcase_instance = testcase("pyspark")
# instantiate wrangler
kwargs = testcase_instance.test_kwargs.copy()
kwargs.update(marker_use)
wrangler_instance = wrangler(**kwargs)
# pass wrangler to test case
testcase_instance.test(wrangler_instance.transform)
@pytest.mark.parametrize(**WRANGLER_KWARGS)
@CollectionIdenticalStartEnd.pytest_parametrize_testcases
def test_identical_start_end(testcase, wrangler):
"""Tests against all available wranglers and test cases.
Parameters
----------
testcase: DataTestCase
Generates test data for given test case.
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
"""
# instantiate test case
testcase_instance = testcase("pyspark")
# instantiate wrangler
wrangler_instance = wrangler(**testcase_instance.test_kwargs)
# pass wrangler to test case
testcase_instance.test(wrangler_instance.transform)
@pytest.mark.parametrize(**WRANGLER_KWARGS)
@CollectionMarkerSpecifics.pytest_parametrize_testcases
def test_marker_specifics(testcase, wrangler):
"""Tests specific `marker_start_use_first` and `marker_end_use_first`
scenarios.
Parameters
----------
testcase: DataTestCase
Generates test data for given test case.
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
"""
# instantiate test case
testcase_instance = testcase("pyspark")
# instantiate wrangler
wrangler_instance = wrangler(**testcase_instance.test_kwargs)
# pass wrangler to test case
testcase_instance.test(wrangler_instance.transform)
@pytest.mark.parametrize(**WRANGLER_KWARGS)
def test_repartition(wrangler):
"""Tests that repartition has no effect.
Parameters
----------
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
"""
# instantiate test case
testcase_instance = MultipleIntervalsSpanningGroupbyExtendedTriple()
# instantiate wrangler
wrangler_instance = wrangler(**testcase_instance.test_kwargs)
# pass wrangler to test case
testcase_instance.test.pyspark(wrangler_instance.transform, repartition=5)
@pytest.mark.parametrize(**WRANGLER_KWARGS)
def test_result_type_raw_iids(wrangler):
"""Test for correct raw iids constraints. Returned result only needs to
distinguish intervals regardless of their validity. Interval ids do not
need to be in specific order.
Parameters
----------
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
"""
testcase_instance = ResultTypeRawIids("pandas")
wrangler_instance = wrangler(result_type="raw",
**testcase_instance.test_kwargs)
df_input = testcase_instance.input.to_pyspark()
df_output = testcase_instance.output.to_pandas()
df_result = wrangler_instance.transform(df_input)
df_result = (PlainFrame.from_pyspark(df_result)
.to_pandas()
.sort_values(testcase_instance.orderby_columns)
.reset_index(drop=True))
col = testcase_instance.target_column_name
pd.testing.assert_series_equal(df_result[col].diff().ne(0),
df_output[col].diff().ne(0))
@CollectionGeneral.pytest_parametrize_kwargs("marker_use")
@pytest.mark.parametrize(**WRANGLER_KWARGS)
def test_result_type_valid_iids(wrangler, marker_use):
"""Test for correct valid iids constraints. Returned result needs to
distinguish valid from invalid intervals. Invalid intervals need to be 0.
Parameters
----------
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
marker_use: dict
Contains `marker_start_use_first` and `marker_end_use_first` parameters
as dict.
"""
testcase_instance = ResultTypeValidIids("pyspark")
kwargs = testcase_instance.test_kwargs.copy()
kwargs.update(marker_use)
wrangler_instance = wrangler(result_type="valid", **kwargs)
df_input = testcase_instance.input.to_pyspark()
df_output = testcase_instance.output.to_pandas()
df_result = wrangler_instance.transform(df_input)
df_result = (PlainFrame.from_pyspark(df_result)
.to_pandas()
.sort_values(testcase_instance.orderby_columns)
.reset_index(drop=True))
col = testcase_instance.target_column_name
pd.testing.assert_series_equal(df_result[col].diff().ne(0),
df_output[col].diff().ne(0))
pd.testing.assert_series_equal(df_result[col].eq(0),
df_output[col].eq(0))
@pytest.mark.parametrize(**WRANGLER_KWARGS)
@CollectionNoOrderGroupBy.pytest_parametrize_testcases
def test_no_order_groupby(testcase, wrangler):
"""Tests correct behaviour for missing groupby columns.
Parameters
----------
testcase: DataTestCase
Generates test data for given test case.
wrangler: pywrangler.wrangler_instance.interfaces.IntervalIdentifier
Refers to the actual wrangler_instance begin tested. See `WRANGLER`.
"""
# instantiate test case
testcase_instance = testcase("pyspark")
# instantiate wrangler
kwargs = testcase_instance.test_kwargs.copy()
kwargs.update({'groupby_columns': None})
wrangler_instance = wrangler(**kwargs)
# pass wrangler to test case
testcase_instance.test(wrangler_instance.transform)
| 2.828125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.