hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a22fdd7d14ecfd07b11dd803793dafe2d21afd0 | 4,391 | py | Python | rokso/rokso.py | matrixbegins/rokso-migrations | 96f1a92e6278c535db73d231867cc60226dd8f18 | [
"MIT"
] | 1 | 2022-03-15T13:17:47.000Z | 2022-03-15T13:17:47.000Z | rokso/rokso.py | matrixbegins/rokso-migrations | 96f1a92e6278c535db73d231867cc60226dd8f18 | [
"MIT"
] | null | null | null | rokso/rokso.py | matrixbegins/rokso-migrations | 96f1a92e6278c535db73d231867cc60226dd8f18 | [
"MIT"
] | 1 | 2020-05-13T14:12:20.000Z | 2020-05-13T14:12:20.000Z | import click, sys, os, pathlib
# sys.path.append(pathlib.Path(__file__).parent.absolute())
try:
from .lib import agent
except ImportError:
from lib import agent
@click.group()
def cli():
pass
@click.command('init', short_help='🚀 init your migration project. configures db connection parameters')
@click.option('--projectpath', prompt='Enter path to setup project',
required=True, envvar='MIG_DB_PROJECT_PATH', help="The path where the project will be setup. rokso can create this directory if not exists.")
@click.option('--dbhost', prompt='Enter database hostname ',
required=True, envvar='MIG_DB_HOST',
help="Database host where rokso will connect to.")
@click.option('--dbname', prompt='Enter database name ',
required=True, envvar='MIG_DB_NAME',
help="Database name where rokso will apply migrations.")
@click.option('--dbusername', prompt='Enter database username ',
required=True, envvar='MIG_DB_USER', help="Database username for connecting database.")
@click.option('--dbpassword', prompt='Enter database password',
required=True, hide_input=True, envvar='MIG_DB_PASSWORD',
help="Database password for connecting database.")
def init(dbhost, dbname, dbusername, dbpassword, projectpath):
"""This commands configures basic environment variables that are needed to cary out database migrations.
Make sure the given user has ALTER, ALTER ROUTINE, CREATE, CREATE ROUTINE, DELETE, DROP, EXECUTE,
INDEX, INSERT, SELECT, SHOW DATABASES, UPDATE privileges.
"""
agent.init_setup(dbhost, dbname, dbusername, dbpassword, projectpath)
@click.command('status', short_help='✅ checks the current state of database and pending migrations')
def status():
""" checks the current state of database and pending migrations. It's good to run this before running migrate command. """
# click.echo('checking database status' + __file__)
agent.db_status()
@click.command('remap', short_help='🔄 Reverse engineer your DB migrations from existing database.')
def remap():
""" Reverse engineer your DB migrations from existing database.
Make sure init command is complete and you have a valid config file and migrations directory setup. """
click.echo('Starting remapping of existing database for versioning')
agent.reverse_engineer_db()
@click.command('create', short_help='➕ create a database migration.')
@click.option('--tablename', required=True, prompt='Enter table/procedure/function name that you want to create this migration for.',
help="The table/procedure/function name for which you want to create the migration.")
@click.option('--filename', required=True, prompt='Enter a file name for this migration.',
help="Name of the migration file.")
def create(tablename, filename):
""" Creates a migration template file for specified table/entity name. """
click.echo('creating a migration ...........')
agent.create_db_migration(tablename, filename)
@click.command('migrate', short_help='⤴️ Apply all outstanding migrations to database.')
@click.option('--migration', help="Specific migration that needs to be carried out.\nThis option must be of format <tableName>/<fileName> and your file must be under the same path inside migration directory")
def migrate(migration):
""" Apply all outstanding migrations to database.
By specifing --migration option you can apply just one single migration. """
# click.echo('Applying following migrations to database....' + migration)
agent.apply_migration(migration)
@click.command('rollback', short_help='⤵️ Rollback last applied migration')
@click.option('--version',
help="Rollbacks database state to specified version.\nThese version numbers can be obtained either from database or by running `rokso status`")
def rollback(version):
""" Rollback last applied out migration
By specifing --version option you can rollback to a previous DB state. """
agent.rollback_db_migration(version)
@click.command('last-success', short_help='⤵️ last successful migration version number')
def last_success():
agent.last_success()
cli.add_command(init)
cli.add_command(status)
cli.add_command(remap)
cli.add_command(create)
cli.add_command(migrate)
cli.add_command(rollback)
cli.add_command(last_success)
def main():
return cli()
if __name__ == '__main__':
main()
| 43.91 | 208 | 0.73719 |
4a22fe21071891c677cbdc4409f946c2979fd518 | 383 | py | Python | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 25 | 2015-04-16T04:31:49.000Z | 2022-03-10T15:53:28.000Z | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2018-11-21T22:40:02.000Z | 2018-11-26T17:53:11.000Z | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2021-03-26T03:36:19.000Z | 2021-03-26T03:36:19.000Z | class C(object):
def __init__(self):
self.x = 42
@property
def f(self):
self.x += 1
return self.x
@f.setter
def f(self, value):
self.x = value
@f.deleter
def f(self):
del self.x
c = C()
assert c.x == 42
assert c.f == 43
c.f = 55
assert c.x == 55
assert c.f == 56
del c.f
assert not hasattr(c, 'x')
assert not hasattr(c, 'f')
assert hasattr(C, 'f')
| 15.32 | 26 | 0.582245 |
4a22fe3dc836ded77f9e576984da0d11042601e3 | 2,420 | py | Python | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | 2 | 2020-10-22T08:13:52.000Z | 2021-01-16T06:56:24.000Z | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | null | null | null | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import random
import string
from hashlib import sha256
from Toy_AE import Toy_AE
from secret import FLAG
def proof_of_work():
random.seed(os.urandom(8))
proof = b''.join([random.choice(string.ascii_letters + string.digits).encode() for _ in range(20)])
digest = sha256(proof).hexdigest().encode()
print("sha256(XXXX+%s) == %s" % (proof[4:],digest))
print("Give me XXXX:")
x = input().encode()
return False if len(x) != 4 or sha256(x + proof[4:]).hexdigest().encode() != digest else True
def pack(uid, uname, token, cmd, appendix):
r = b''
r += b'Uid=%d\xff' % uid
r += b'UserName=%s\xff' % uname
r += b'T=%s\xff' % token
r += b'Cmd=%s\xff' % cmd
r += appendix
return r
def unpack(r):
data = r.split(b"\xff")
uid, uname, token, cmd, appendix = int(data[0][4:]), data[1][9:], data[2][2:], data[3][4:], data[4]
return (uid, uname, token, cmd, appendix)
def apply_ticket():
uid = int(input("Set up your user id:")[:5])
uname = input("Your username:").encode("ascii")[:16]
if uname == b"Administrator":
print("Sorry, preserved username.")
return
token = sha256(uname).hexdigest()[:max(8, uid % 16)].encode("ascii")
cmd = input("Your command:").encode("ascii")[:16]
if cmd == b"Give_Me_Flag":
print("Not allowed!")
return
appendix = input("Any Appendix?").encode("ascii")[:16]
msg = pack(uid, uname, token, cmd, appendix)
ct, te = ae.encrypt(msg)
print("Your ticket:%s" % ct.hex())
print("With my Auth:%s" % te.hex())
def check_ticket():
ct = bytes.fromhex(input("Ticket:"))
te = bytes.fromhex(input("Auth:"))
msg = ae.decrypt(ct, te)
assert msg
uid, uname, token, cmd, appendix = unpack(msg)
if uname == b"Administrator" and cmd == b"Give_Me_Flag":
print(FLAG)
exit(0)
else:
print("Nothing happend.")
def menu():
print("Menu:")
print("[1] Apply Ticket")
print("[2] Check Ticket")
print("[3] Exit")
op = int(input("Your option:"))
assert op in range(1, 4)
if op == 1:
apply_ticket()
elif op == 2:
check_ticket()
else:
print("Bye!")
exit(0)
if __name__ == "__main__":
ae = Toy_AE()
if not proof_of_work():
exit(-1)
for _ in range(4):
try:
menu()
except:
exit(-1) | 28.470588 | 103 | 0.572314 |
4a22ff454ee0aebd0e368db6956218984223477f | 4,659 | py | Python | configuration.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | configuration.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | configuration.py | SDRAST/support | ebe081692fc8c892c1389156e979d5baa7f8f0e6 | [
"Apache-2.0"
] | null | null | null | """
module configuration - Provides a single file for program parameters
Includes names of directories and files. Both functions are private, meant to
be used only by software configuration generators.
"""
import os
import datetime
#from support.pyro import async
# ----- general classes and functions for managing software configurations ----
def _check_and_create(path):
"""
Check for path and create if needed
This is a private function.
"""
if not os.path.exists(path):
os.makedirs(path)
def _make_properties(prop_names):
"""
Decorator function for adding public properties to classes
This creates the property, and its getter and setter methods
Dean comments: "_make_properties is a _very esoteric function, and I
remember wondering if I should remove it because it decreases readability
while saving only a few lines of code. In general, I sort of think class
decorators should be used sparingly, as they often obfuscate more than they
clarify, (I would argue that the Pyro4.expose decorator is an example of a
good class decorator.)"
@param prop_names : properties to be defined for the decorated class
@type prop_names : list of str
"""
def property_factory(prop_name):
"""
local function which returns a getter and setter
"""
def prop_get(self):
"""
this defines a method for getting the property's value
"""
prop_val = getattr(self, "_{}".format(prop_name))
return prop_val
def prop_set(self, new_val):
"""
this defines a method for setting the property's value
"""
setattr(self, "_{}".format(prop_name), new_val)
# return the methods for the named property
return prop_get, prop_set
def wrapper(cls):
"""
Enhances a class by setting the attributes (property_names) passed to
the decorator function
@param cls : class to be decorated
@type cls : class
"""
for prop_name in prop_names:
prop = property(*property_factory(prop_name))
setattr(cls, prop_name, prop)
return cls
return wrapper
class Configuration(object):
"""
class for creating software configurations
"""
_protected = {"_emitter", "_config"} # reserved attributes
def __init__(self, cls, *args, **kwargs):
"""
Initialize a configuration
An example of ``cls`` would be ``TAMSConfiguration`` (in ``support.\``
``configuration``). ``args`` and ``kwargs`` are the arguments used by
``clss`` to make the actual configuration, which is a ``dict``.
The configuration may have an associated event emitter to which
callbacks (functions to invoke when something changes) can be
registered. If the keyword argument ``threaded`` is not provided then
there will be no event emitter.
@param cls : class from which a configuration object will be created
@type cls : class
@param args : additional positional arguments for initializing 'cls'
@type args : list
@param kwargs : keyword arguments for initializing 'cls'
@type kwargs : dict
"""
# create the event emitter
#self._emitter = async.EventEmitter(
# threaded=kwargs.pop("threaded", False)
#)
# create the configuration
self._config = cls(*args, **kwargs)
def __getattr__(self, attr):
if attr in Configuration._protected:
return object.__getattribute__(self, attr)
#elif attr in self._emitter.__class__.__dict__:
# return getattr(self._emitter, attr)
else:
# self._emitter.emit(attr)
return getattr(self._config, attr)
def __setattr__(self, attr, val):
if attr in Configuration._protected:
object.__setattr__(self, attr, val)
else:
setattr(self._config, attr, val)
self._emitter.emit(attr, val)
def make_today_dir(self, base_dir):
"""
Make the following directory structure:
base_dir/<year>/<doy>
Args:
base_dir (str):
Returns:
str: base_dir with year and doy subdirectories
"""
year, doy = datetime.datetime.utcnow().strftime("%Y,%j").split(",")
today_dir = os.path.join(base_dir, year, doy)
_check_and_create(today_dir)
return today_dir
| 32.58042 | 79 | 0.618158 |
4a23001717e15f004959047456813bc5d74f9513 | 3,361 | py | Python | actions/configure_vcs_vip.py | StackStorm-Exchange/vdx_vtep | aa010847faf83a6c57be551133c76c999ee885f8 | [
"Apache-2.0"
] | null | null | null | actions/configure_vcs_vip.py | StackStorm-Exchange/vdx_vtep | aa010847faf83a6c57be551133c76c999ee885f8 | [
"Apache-2.0"
] | 1 | 2017-05-03T16:09:28.000Z | 2017-07-19T12:53:40.000Z | actions/configure_vcs_vip.py | StackStorm-Exchange/vdx_vtep | aa010847faf83a6c57be551133c76c999ee885f8 | [
"Apache-2.0"
] | 2 | 2017-05-03T12:43:55.000Z | 2021-01-28T17:48:14.000Z | # Copyright 2017 Great Software Laboratory Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pynos.device
from st2common.runners.base_action import Action
from ipaddress import ip_interface
class ConfigureVcsVip(Action):
"""
Implements the logic to set Virtual IP in VCS Fabric.
This action acheives the below functionality:
1. Set VIP on the device
"""
def __init__(self, config=None):
super(ConfigureVcsVip, self).__init__(config=config)
def run(self, host=None, vip_mask=None, username=None, password=None):
"""Run helper methods to implement the desired state.
"""
if vip_mask is None:
vip_mask = self.config['vip_mask']
if username is None:
username = self.config['username']
if password is None:
password = self.config['password']
if host is None:
host = self.config['mgmt_ip1']
conn = (host, '22')
auth = (username, password)
changes = {}
with pynos.device.Device(conn=conn, auth=auth) as device:
changes['pre_requisites'] = self._check_requirements(device, vip_mask)
changes['configure_vcs'] = False
if changes['pre_requisites']:
changes['configure_vcs'] = self._set_vcs_vip(device, vip_mask)
else:
self.logger.info(
'Pre-requisites validation failed for Virtual IP configuration')
if not changes['configure_vcs']:
self.logger.info('Virtual IP configuration in Vcs Fabric Failed')
exit(1)
else:
self.logger.info(
'closing connection to %s after configuring Virtual IP successfully!',
host)
return changes
def _check_requirements(self, device, vip):
""" Verify if the Virtual IP already exists
"""
ipaddress = ip_interface(unicode(vip))
vips = device.vcs.vcs_vip(get=True)
if ipaddress.version == 4:
ipv4_config = vips['ipv4_vip']
conf = ipv4_config.data.find('.//{*}address')
if ipaddress.version == 6:
ipv6_config = vips['ipv6_vip']
conf = ipv6_config.data.find('.//{*}ipv6address')
if conf is not None:
self.logger.info("VCS virtual IPv %s address is already configured" % ipaddress.version)
return False
return True
def _set_vcs_vip(self, device, vip):
"""
Set VCS Virtual IP on the device.
"""
try:
device.vcs.vcs_vip(vip=vip)
return True
except RuntimeError as e:
self.logger.error(
'Configuring VCS VIP: %s Failed with Exception: %s' % (e, vip))
return False
| 34.649485 | 100 | 0.607855 |
4a23004fa365a64140f9b665e2cd0df5eda9cdf8 | 4,532 | py | Python | plaid_integration/plaid_integration/doctype/plaid_settings/plaid_settings.py | drivedgevd/plaid_integration | 41965e6afee61586614f0de38e361e41a424be40 | [
"MIT"
] | null | null | null | plaid_integration/plaid_integration/doctype/plaid_settings/plaid_settings.py | drivedgevd/plaid_integration | 41965e6afee61586614f0de38e361e41a424be40 | [
"MIT"
] | null | null | null | plaid_integration/plaid_integration/doctype/plaid_settings/plaid_settings.py | drivedgevd/plaid_integration | 41965e6afee61586614f0de38e361e41a424be40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Drivedgevd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import cstr
from frappe.model.document import Document
from frappe.utils import now_datetime
from frappe.utils.background_jobs import enqueue
from plaid_integration.plaid_integration.plaid_controller import PlaidController
class PlaidSettings(Document):
def generate_access_token(self, public_token):
plaid = PlaidController()
access_token = plaid.get_access_token(public_token)
return access_token
def sync_transactions(self, bank, access_token):
try:
self.db_set("sync_status", "In Progress")
frappe.msgprint(_("Queued for Syncing Plaid Transactions. It may take a few minutes."))
method = "plaid_integration.plaid_integration.doctype.plaid_settings.plaid_settings.sync_plaid_data"
enqueue(method, now=True)
except Exception as e:
print frappe.get_traceback()
self.db_set("sync_status", "Failed")
frappe.msgprint("Syncing is failed. Please check Sync log")
def get_transactions(self, access_token):
plaid = PlaidController(access_token)
transactions = plaid.get_trasaction()
return transactions
def make_sync_data_entries(self, synced_data, bank):
accounts = self.sync_accounts(synced_data.get('accounts'), bank)
transactions = self.sync_plaid_transactions(synced_data.get('transactions'), bank)
return {"accounts": accounts, "transactions": transactions}
def sync_accounts(self, accounts, bank):
try:
acc_ids = []
for acc in accounts:
if not frappe.db.exists("Plaid Account", acc.get('account_id')):
acc_ = frappe.new_doc("Plaid Account")
acc_.account_id = acc.get('account_id')
acc_.official_name = acc.get('official_name')
acc_.insert()
acc_ids.append(acc_.name)
return acc_ids
except Exception as e:
raise e
def sync_plaid_transactions(self, transactions, bank):
try:
transaction_ids = []
for idx, row in enumerate(transactions):
pt = self.make_plaid_transaction(row, bank)
if pt: transaction_ids.append(pt)
return transaction_ids
except Exception as e:
raise e
def make_plaid_transaction(self, transaction, bank):
try:
if not frappe.db.get_value("Plaid Transaction",
{"transaction_id": transaction.get('transaction_id')},"name"):
fields = ['account_owner', 'category_id', 'account_id', 'pending_transaction_id'\
'transaction_name', 'date', 'transaction_id', 'transaction_type', 'amount', 'pending']
pt = frappe.new_doc("Plaid Transaction")
doc_dict = {}
for key, val in transaction.iteritems():
if key in fields:
doc_dict[key] = val
elif key == "category":
doc_dict[key] = ("-").join(val) if val else ""
elif key == "location":
doc_dict['location'] = (",\n").join([ key+"-"+str(val) for key, val in val.iteritems()])
elif key == "payment_meta" and val:
payment_meta = []
for k, v in val.iteritems():
payment_meta.append({
"key": k,
"value": v
})
doc_dict[key] = payment_meta
elif key == "name":
doc_dict["transaction_name"] = val
else: pass
doc_dict["bank"] = bank
doc_dict['account_type'] = frappe.db.get_value("Plaid Account", {
"account_id": doc_dict.get('account_id')}, "official_name")
pt.update(doc_dict)
pt.insert()
return pt.name
except Exception as e:
raise e
def make_sync_log(self, bank, sync_details, status):
try:
sync_log = frappe.new_doc("Plaid Sync Log")
sync_log.date = frappe.utils.now()
sync_log.sync_status = status
sync_log.bank = bank
sync_log.sync_log = cstr(sync_details)
sync_log.save()
frappe.db.commit()
except Exception as e:
print frappe.get_traceback()
raise e
@frappe.whitelist()
def sync_plaid_data():
plaid_settings = frappe.get_doc("Plaid Settings", "Plaid Settings")
for bank in plaid_settings.banks:
try:
if bank.get('access_token'):
sync_data = plaid_settings.get_transactions(bank.get('access_token'))
sync_details = plaid_settings.make_sync_data_entries(sync_data, bank.get('bank_name'))
plaid_settings.make_sync_log(bank.get('bank_name'), sync_details, "Successful")
except Exception as e:
err_msg = frappe.as_unicode(e)
plaid_settings.make_sync_log(bank.get('bank_name'), err_msg, "Failed")
plaid_settings.sync_status = "Successful"
plaid_settings.last_sync = now_datetime()
plaid_settings.save()
| 34.861538 | 103 | 0.716461 |
4a2300c5a16548e846e5fdc7b94e8857711d0e49 | 1,043 | py | Python | motey/val/plugins/xenVAL.py | Neoklosch/fog_node_prototype | d3f07d9d161d97ec2c19f66167dfb26eb9c6e616 | [
"Apache-2.0"
] | null | null | null | motey/val/plugins/xenVAL.py | Neoklosch/fog_node_prototype | d3f07d9d161d97ec2c19f66167dfb26eb9c6e616 | [
"Apache-2.0"
] | null | null | null | motey/val/plugins/xenVAL.py | Neoklosch/fog_node_prototype | d3f07d9d161d97ec2c19f66167dfb26eb9c6e616 | [
"Apache-2.0"
] | 1 | 2017-08-29T20:37:12.000Z | 2017-08-29T20:37:12.000Z | import motey.val.plugins.abstractVAL as abstractVAL
class XenVAL(abstractVAL.AbstractVAL):
def __init__(self):
super().__init__()
def has_image(self, image_name):
raise NotImplementedError("Should have implemented this")
def load_image(self, image_name):
raise NotImplementedError("Should have implemented this")
def delete_image(self, image_name):
raise NotImplementedError("Should have implemented this")
def create_instance(self, image_name, parameters={}):
raise NotImplementedError("Should have implemented this")
def start_instance(self, container_name, parameters={}):
raise NotImplementedError("Should have implemented this")
def stop_instance(self, container_name):
raise NotImplementedError("Should have implemented this")
def has_instance(self, instance_name):
raise NotImplementedError("Should have implemented this")
def get_stats(self, container_name):
raise NotImplementedError("Should have implemented this")
| 33.645161 | 65 | 0.731544 |
4a23018def484762eca33992075f151eb5927854 | 377 | py | Python | tests/test_boolean.py | masteralves/python-fastjsonschema | c0f196463d517b45cf55f717b1468e4d9a5b68e8 | [
"BSD-3-Clause"
] | 1 | 2019-06-29T22:50:26.000Z | 2019-06-29T22:50:26.000Z | tests/test_boolean.py | masteralves/python-fastjsonschema | c0f196463d517b45cf55f717b1468e4d9a5b68e8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_boolean.py | masteralves/python-fastjsonschema | c0f196463d517b45cf55f717b1468e4d9a5b68e8 | [
"BSD-3-Clause"
] | null | null | null |
import pytest
from fastjsonschema import JsonSchemaException
exc = JsonSchemaException('data must be boolean')
@pytest.mark.parametrize('value, expected', [
(0, exc),
(None, exc),
(True, True),
(False, False),
('abc', exc),
([], exc),
({}, exc),
])
def test_boolean(asserter, value, expected):
asserter({'type': 'boolean'}, value, expected)
| 19.842105 | 50 | 0.628647 |
4a230208e901b1c867ca350bfeac4c23db34d799 | 1,265 | py | Python | tests/test_event.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 7 | 2015-03-17T18:29:14.000Z | 2020-01-03T06:45:43.000Z | tests/test_event.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 1 | 2015-06-04T03:06:46.000Z | 2015-06-04T03:06:46.000Z | tests/test_event.py | johnnoone/aioconsul | 02f7a529d7dc2e49bed942111067aa5faf320e90 | [
"BSD-3-Clause"
] | 2 | 2015-06-03T16:53:11.000Z | 2021-12-16T13:38:23.000Z | import pytest
from collections.abc import Mapping, Sequence
@pytest.mark.asyncio
async def test_endpoint(client):
assert repr(client.event) == "<EventEndpoint(%r)>" % str(client.address)
@pytest.mark.asyncio
async def test_event(client):
event = await client.event.fire("foobar")
assert isinstance(event, Mapping)
assert "Name" in event
assert "NodeFilter" in event
assert "TagFilter" in event
assert "ID" in event
assert "Payload" in event
assert "ServiceFilter" in event
assert "Version" in event
assert "LTime" in event
items, meta = await client.event.items("foobar")
assert isinstance(items, Sequence)
assert "Index" in meta
assert "KnownLeader" not in meta
assert "LastIndex" not in meta
@pytest.mark.asyncio
async def test_payload(client):
event = await client.event.fire("bazqux", payload=b"foobar")
assert event["Name"] == "bazqux"
assert event["Payload"] == b"foobar"
items, _ = await client.event.items("bazqux")
for elt in items:
if elt["ID"] == event["ID"]:
break
else:
pytest.fail("Unable to find event")
assert elt["ID"] == event["ID"]
assert elt["Name"] == event["Name"]
assert elt["Payload"] == event["Payload"]
| 28.111111 | 76 | 0.66166 |
4a230287b262e0bdd854cbbb419aa495ab1529bd | 16,673 | py | Python | language/mentionmemory/tasks/mention_memory_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/mentionmemory/tasks/mention_memory_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/mentionmemory/tasks/mention_memory_task.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains mention memory model implementation."""
from typing import Any, Callable, Dict, Optional, Tuple
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import mention_memory_encoder
from language.mentionmemory.modules import mention_losses
from language.mentionmemory.modules import mlm_layer
from language.mentionmemory.tasks import mention_encoder_task
from language.mentionmemory.tasks import task_registry
from language.mentionmemory.utils import default_values
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils import metric_utils
from language.mentionmemory.utils.custom_types import Array, MetricGroups # pylint: disable=g-multiple-import
import language.mentionmemory.utils.mention_preprocess_utils as mention_preprocess_utils
import ml_collections
import tensorflow.compat.v2 as tf
class MentionMemoryModel(nn.Module):
"""Mention Memory pre-training model.
Attributes:
encoder_config: Mention Memory encoder hyperparameters.
"""
encoder_config: ml_collections.FrozenConfigDict
def setup(self):
self.encoder = mention_memory_encoder.MentionMemoryEncoder(
**self.encoder_config)
self.mlm_layer = mlm_layer.MLMLayer(
vocab_size=self.encoder.vocab_size,
hidden_size=self.encoder.hidden_size,
dtype=self.encoder.dtype,
layer_norm_epsilon=self.encoder.layer_norm_epsilon,
embedding_init=self.encoder.kernel_init,
bias_init=self.encoder.bias_init,
)
def __call__(
self, batch: Dict[str, Array],
deterministic: bool) -> Tuple[Dict[str, Array], Dict[str, Array]]:
encoded_input, loss_helpers, logging_helpers = self.encoder.forward(
batch, deterministic)
loss_helpers['mlm_logits'] = self.mlm_layer(
encoded_input=encoded_input,
mlm_target_positions=batch['mlm_target_positions'],
shared_embedding=loss_helpers['word_embeddings'])
return loss_helpers, logging_helpers
@task_registry.register_task('mention_memory')
class MentionMemoryTask(mention_encoder_task.MentionEncoderTask):
"""Pre-training task for mention memory encoder."""
model_class = MentionMemoryModel
encoder_name = 'mention_memory'
@classmethod
def make_loss_fn(
cls, config: ml_collections.ConfigDict
) -> Callable[..., Tuple[float, MetricGroups, Dict[str, Any]]]:
"""Creates task loss function.
See BaseTask.
The Mention Memory encoder is pre-trained with a combination of 1) MLM loss,
2) same-entity retrieval loss encouraging retrieval of mentions of the same
entity as the passage mention, 3) entity coreference loss encouraging
mentions of the same entity to have similar representations, and 4) Matching
the Blanks-style loss encouraging mentions of the same entity which co-occur
with mentions of the same second entity to have similar representations.
Args:
config: contains experiment hyperparameters.
Returns:
Loss function.
"""
mlm_weight = config.mlm_weight
el_im_weight = config.el_im_weight
el_second_im_weight = config.get('el_second_im_weight', 0)
coref_res_weight = config.get('coref_res_weight', 0)
coref_res_mode = config.get('coref_res_mode', 'dot')
mtb_im_weight = config.get('mtb_im_weight', 0)
mtb_final_weight = config.get('mtb_final_weight', 0)
mtb_score_mode = config.get('mtb_score_mode', 'dot')
same_passage_weight = config.get('same_passage_weight', 0)
same_entity_set_retrieval_weight = config.get(
'same_entity_set_retrieval_weight', 0)
el_final_weight = config.get('el_final_weight', 0)
def loss_fn(
model_config: ml_collections.FrozenConfigDict,
model_params: Dict[str, Any],
model_vars: Dict[str, Any],
batch: Dict[str, Any],
deterministic: bool,
dropout_rng: Optional[Dict[str, Array]] = None,
) -> Tuple[float, MetricGroups, Dict[str, Any]]:
"""Model-specific loss function. See BaseTask."""
batch_size = batch['text_ids'].shape[0]
mention_target_ids = batch['mention_target_ids']
mention_target_ids *= batch['mention_target_weights']
variable_dict = {'params': model_params}
variable_dict.update(model_vars)
loss_helpers, logging_helpers = cls.build_model(model_config).apply(
variable_dict, batch, deterministic=deterministic, rngs=dropout_rng)
mlm_logits = loss_helpers['mlm_logits']
mlm_target_is_mention = batch['mlm_target_is_mention']
mlm_target_is_not_mention = 1 - batch['mlm_target_is_mention']
mention_target_is_masked = batch['mention_target_is_masked']
mention_target_is_not_masked = 1 - mention_target_is_masked
mlm_loss, mlm_denom = metric_utils.compute_weighted_cross_entropy(
mlm_logits, batch['mlm_target_ids'], batch['mlm_target_weights'])
correct_mask = jnp.equal(
jnp.argmax(mlm_logits, axis=-1),
batch['mlm_target_ids']) * batch['mlm_target_weights']
mlm_acc = correct_mask.sum()
mlm_mention_acc = (correct_mask * mlm_target_is_mention).sum()
mlm_mention_denom = (batch['mlm_target_weights'] *
mlm_target_is_mention).sum()
mlm_non_mention_acc = (correct_mask * mlm_target_is_not_mention).sum()
mlm_non_mention_denom = (batch['mlm_target_weights'] *
mlm_target_is_not_mention).sum()
loss = mlm_weight * mlm_loss / mlm_denom
metrics = {
'mlm': {
'loss': mlm_loss,
'acc': mlm_acc,
'denominator': mlm_denom,
},
'mlm_mention': {
'acc': mlm_mention_acc,
'denominator': mlm_mention_denom,
},
'mlm_non_mention': {
'acc': mlm_non_mention_acc,
'denominator': mlm_non_mention_denom,
},
}
def process_el_im_loss(loss, weight, prefix=''):
memory_attention_weights = loss_helpers[prefix +
'memory_attention_weights']
memory_entity_ids = loss_helpers[prefix + 'top_entity_ids']
target_mentions_memory_attention_weights = jut.matmul_slice(
memory_attention_weights, batch['mention_target_indices'])
intermediate_entity_ids = jut.matmul_slice(
memory_entity_ids, batch['mention_target_indices'])
el_loss_intermediate, same_entity_avg_prob, el_im_denom = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
target_mentions_memory_attention_weights, intermediate_entity_ids,
mention_target_ids, batch['mention_target_weights'])
if weight > 0:
loss += weight * el_loss_intermediate / el_im_denom
metrics[prefix + 'el_intermediate'] = {
'loss': el_loss_intermediate,
'same_entity_avg_prob': same_entity_avg_prob,
'denominator': el_im_denom,
}
return loss
loss = process_el_im_loss(loss, el_im_weight)
if 'second_memory_attention_weights' in loss_helpers:
loss = process_el_im_loss(loss, el_second_im_weight, 'second_')
if coref_res_weight > 0:
(coref_res_loss,
coref_res_metrics) = mention_losses.coreference_resolution_loss(
loss_helpers['target_mention_encodings'],
batch['mention_target_batch_positions'], mention_target_ids,
batch_size, coref_res_mode, mention_target_is_masked)
coref_res_denom = coref_res_metrics['coref_resolution']['denominator']
loss += coref_res_weight * coref_res_loss / coref_res_denom
metrics.update(coref_res_metrics)
if mtb_im_weight > 0:
(mtb_im_loss, mtb_im_metrics) = mention_losses.mtb_loss(
loss_helpers['intermediate_target_mention_encodings'],
batch['mention_target_batch_positions'], mention_target_ids,
batch_size, mtb_score_mode, mention_target_is_masked, 'im_')
mtb_im_denom = mtb_im_metrics['im_mtb']['denominator']
loss += mtb_im_weight * mtb_im_loss / mtb_im_denom
metrics.update(mtb_im_metrics)
if mtb_final_weight > 0:
(mtb_final_loss, mtb_final_metrics) = mention_losses.mtb_loss(
loss_helpers['target_mention_encodings'],
batch['mention_target_batch_positions'], mention_target_ids,
batch_size, mtb_score_mode, mention_target_is_masked, 'final_')
mtb_final_denom = mtb_final_metrics['final_mtb']['denominator']
loss += mtb_final_weight * mtb_final_loss / mtb_final_denom
metrics.update(mtb_final_metrics)
if same_passage_weight > 0:
same_passage_mask = loss_helpers['memory_attention_disallowed_mask']
(same_passage_loss, same_passage_metrics, _
) = metric_utils.compute_cross_entropy_loss_with_positives_and_negatives_masks(
loss_helpers['memory_attention_scores_with_disallowed'],
same_passage_mask, jnp.logical_not(same_passage_mask),
batch['mention_mask'])
same_passage_denom = same_passage_metrics['denominator']
loss += same_passage_weight * same_passage_loss / same_passage_denom
metrics['same_passage'] = same_passage_metrics
if same_entity_set_retrieval_weight > 0:
if config.get('same_entity_set_target_threshold') is None:
raise ValueError(
'`same_entitites_retrieval_threshold` must be specified '
'if `same_entity_set_retrieval_weight` is provided')
(same_entity_set_retrieval_loss, same_entity_set_retrieval_avg_prob,
same_entity_set_retrieval_denom
) = mention_losses.same_entity_set_retrieval_loss(
mention_target_batch_positions=batch[
'mention_target_batch_positions'],
mention_target_ids=mention_target_ids,
mention_target_weights=batch['mention_target_weights'],
mention_batch_positions=batch['mention_batch_positions'],
mention_mask=batch['mention_mask'],
memory_text_entities=loss_helpers['memory_top_text_entities'],
memory_attention_weights=loss_helpers['memory_attention_weights'],
memory_mask=1 - loss_helpers['memory_attention_disallowed_mask'],
batch_size=batch_size,
same_entity_set_target_threshold=config
.same_entity_set_target_threshold)
loss += (
same_entity_set_retrieval_weight * same_entity_set_retrieval_loss /
same_entity_set_retrieval_denom)
metrics['same_entity_set_retrieval'] = {
'loss': same_entity_set_retrieval_loss,
'avg_prob': same_entity_set_retrieval_avg_prob,
'denominator': same_entity_set_retrieval_denom,
}
if el_final_weight > 0:
final_attention_weights = loss_helpers['final_memory_attention_weights']
final_memory_entity_ids = loss_helpers['final_top_entity_ids']
(el_loss_final, same_entity_avg_prob_final, el_loss_denom
) = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
final_attention_weights, final_memory_entity_ids,
mention_target_ids, batch['mention_target_weights'])
(_, same_entity_avg_prob_final_masked, el_loss_denom_masked
) = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
final_attention_weights, final_memory_entity_ids,
mention_target_ids,
batch['mention_target_weights'] * mention_target_is_masked)
(_, same_entity_avg_prob_final_not_masked, el_loss_denom_not_masked
) = metric_utils.compute_loss_and_prob_from_probs_with_duplicates(
final_attention_weights, final_memory_entity_ids,
mention_target_ids,
batch['mention_target_weights'] * mention_target_is_not_masked)
metrics['el_final'] = {
'loss': el_loss_final,
'same_entity_avg_prob': same_entity_avg_prob_final,
'denominator': el_loss_denom,
}
metrics['el_final_masked'] = {
'same_entity_avg_prob': same_entity_avg_prob_final_masked,
'denominator': el_loss_denom_masked,
}
metrics['el_final_not_masked'] = {
'same_entity_avg_prob': same_entity_avg_prob_final_not_masked,
'denominator': el_loss_denom_not_masked,
}
loss += el_final_weight * el_loss_final / (
el_loss_denom + default_values.SMALL_NUMBER)
metrics['agg'] = {
'loss': loss,
'denominator': 1.0,
}
if 'n_disallowed' in logging_helpers:
metrics['disallowed'] = {
'per_mention': logging_helpers['n_disallowed'],
'denominator': batch['mention_mask'].sum(),
}
if 'second_n_disallowed' in logging_helpers:
metrics['second_n_disallowed'] = {
'per_mention': logging_helpers['second_n_disallowed'],
'denominator': batch['mention_mask'].sum(),
}
auxiliary_output = {
'top_entity_ids': loss_helpers['top_entity_ids'],
'top_memory_ids': loss_helpers['top_memory_ids'],
}
if 'second_top_entity_ids' in loss_helpers:
auxiliary_output['second_top_entity_ids'] = loss_helpers[
'second_top_entity_ids']
auxiliary_output['second_top_memory_ids'] = loss_helpers[
'second_top_memory_ids']
return loss, metrics, auxiliary_output
return loss_fn
@staticmethod
def make_preprocess_fn(
config: ml_collections.ConfigDict
) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:
"""Produces function to preprocess samples.
See BaseTask.
Here we add a text identifier hash to the standard MentionEncoderTask
preprocessing pipeline.
Args:
config: ConfigDict. Contains experiment hyperparameters.
Returns:
Function that preprocesses samples to be usable for the model
(mod casting from tf to jnp dtype).
"""
max_length = config.model_config.encoder_config.max_length
mention_preprocessing_fn = mention_encoder_task.MentionEncoderTask.make_preprocess_fn(config) # pylint: disable=line-too-long
def preprocess_fn(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Performs preprocessing for individual sample."""
new_example = mention_preprocessing_fn(example)
# Compute hash of text for text identifiers
new_example['text_identifiers'] = mention_preprocess_utils.text_hash_tf(
example['text_ids'], max_length)
return new_example
return preprocess_fn
@staticmethod
def make_collater_fn(
config: ml_collections.ConfigDict
) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:
"""Produces function to preprocess batches.
See BaseTask.
Batches text identifiers after standard mention preprocessing.
Args:
config: contains experiment hyperparameters.
Returns:
Function that preprocesses batches to be usable for the model
(mod casting from tf to jnp dtype).
"""
mention_collater_fn = mention_encoder_task.MentionEncoderTask.make_collater_fn(config) # pylint: disable=line-too-long
def collater_fn(batch: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
new_batch = mention_collater_fn(batch)
new_batch['text_identifiers'] = tf.gather(
new_batch['text_identifiers'], new_batch['mention_batch_positions'])
return new_batch
return collater_fn
@staticmethod
def dummy_input(config: ml_collections.ConfigDict) -> Dict[str, Any]:
"""Produces model-specific dummy input batch. See BaseTask."""
dummy_input = mention_encoder_task.MentionEncoderTask.dummy_input(config)
mention_position_shape = (config.max_mentions *
config.per_device_batch_size,)
int_type = jnp.int32
dummy_input['text_identifiers'] = jnp.ones(mention_position_shape, int_type)
return dummy_input
| 41.269802 | 130 | 0.704132 |
4a230290b2a46a40286a765697451db7b2fe7c4d | 17,406 | py | Python | news-analysis.py | harrypottercookies/Binance-News-Sentiment-Bot | 28dcbc944f07cc9e1a507f7c7e202627740e736b | [
"MIT"
] | 1,521 | 2021-04-17T17:45:31.000Z | 2022-03-29T23:15:33.000Z | news-analysis.py | harrypottercookies/Binance-News-Sentiment-Bot | 28dcbc944f07cc9e1a507f7c7e202627740e736b | [
"MIT"
] | 56 | 2021-04-18T11:26:01.000Z | 2022-02-20T16:26:31.000Z | news-analysis.py | harrypottercookies/Binance-News-Sentiment-Bot | 28dcbc944f07cc9e1a507f7c7e202627740e736b | [
"MIT"
] | 305 | 2021-04-17T18:39:54.000Z | 2022-03-19T03:55:06.000Z |
# import for environment variables and waiting
import os, time
# used to parse XML feeds
import xml.etree.ElementTree as ET
# we use it to make async http requests
import aiohttp
# allows us to make our functions async
import asyncio
# date modules that we'll most likely need
from datetime import date, datetime, timedelta
# used to grab the XML url list from a CSV file
import csv
# used to save and load coins_in_hand dictionary
import json
# numpy for sums and means
import numpy as np
# nlp library to analyse sentiment
import nltk
import pytz
from nltk.sentiment import SentimentIntensityAnalyzer
# needed for the binance API
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException, BinanceOrderException
# used for binance websocket
from binance.websockets import BinanceSocketManager
from twisted.internet import reactor
# used for executing the code
from itertools import count
# we use it to time our parser execution speed
from timeit import default_timer as timer
# Use testnet (change to True) or live (change to False)?
testnet = True
# get binance key and secret from environment variables for testnet and live
api_key_test = os.getenv('binance_api_stalkbot_testnet')
api_secret_test = os.getenv('binance_secret_stalkbot_testnet')
api_key_live = os.getenv('binance_api_stalkbot_live')
api_secret_live = os.getenv('binance_secret_stalkbot_live')
# Authenticate with the client
if testnet:
client = Client(api_key_test, api_secret_test)
else:
client = Client(api_key_live, api_secret_live)
# The API URL is manually changed in the library to work on the testnet
if testnet:
client.API_URL = 'https://testnet.binance.vision/api'
############################################
# USER INPUT VARIABLES LIVE BELOW #
# You may edit those to configure your bot #
############################################
# select what coins to look for as keywords in articles headlines
# The key of each dict MUST be the symbol used for that coin on Binance
# Use each list to define keywords separated by commas: 'XRP': ['ripple', 'xrp']
# keywords are case sensitive
keywords = {
'XRP': ['ripple', 'xrp', 'XRP', 'Ripple', 'RIPPLE'],
'BTC': ['BTC', 'bitcoin', 'Bitcoin', 'BITCOIN'],
'XLM': ['Stellar Lumens', 'XLM'],
#'BCH': ['Bitcoin Cash', 'BCH'],
'ETH': ['ETH', 'Ethereum'],
'BNB' : ['BNB', 'Binance Coin'],
'LTC': ['LTC', 'Litecoin']
}
# The Buy amount in the PAIRING symbol, by default USDT
# 100 will for example buy the equivalent of 100 USDT in Bitcoin.
QUANTITY = 100
# define what to pair each coin to
# AVOID PAIRING WITH ONE OF THE COINS USED IN KEYWORDS
PAIRING = 'USDT'
# define how positive the news should be in order to place a trade
# the number is a compound of neg, neu and pos values from the nltk analysis
# input a number between -1 and 1
SENTIMENT_THRESHOLD = 0
NEGATIVE_SENTIMENT_THRESHOLD = 0
# define the minimum number of articles that need to be analysed in order
# for the sentiment analysis to qualify for a trade signal
# avoid using 1 as that's not representative of the overall sentiment
MINUMUM_ARTICLES = 1
# define how often to run the code (check for new + try to place trades)
# in minutes
REPEAT_EVERY = 60
# define how old an article can be to be included
# in hours
HOURS_PAST = 24
############################################
# END OF USER INPUT VARIABLES #
# Edit with care #
############################################
# coins that bought by the bot since its start
coins_in_hand = {}
# path to the saved coins_in_hand file
coins_in_hand_file_path = 'coins_in_hand.json'
# use separate files for testnet and live
if testnet:
coins_in_hand_file_path = 'testnet_' + coins_in_hand_file_path
# if saved coins_in_hand json file exists then load it
if os.path.isfile(coins_in_hand_file_path):
with open(coins_in_hand_file_path) as file:
coins_in_hand = json.load(file)
# and add coins from actual keywords if they aren't in coins_in_hand dictionary already
for coin in keywords:
if coin not in coins_in_hand:
coins_in_hand[coin] = 0
# current price of CRYPTO pulled through the websocket
CURRENT_PRICE = {}
def ticker_socket(msg):
'''Open a stream for financial information for CRYPTO'''
if msg['e'] != 'error':
global CURRENT_PRICE
CURRENT_PRICE['{0}'.format(msg['s'])] = msg['c']
else:
print('error')
# connect to the websocket client and start the socket
bsm = BinanceSocketManager(client)
for coin in keywords:
conn_key = bsm.start_symbol_ticker_socket(coin+PAIRING, ticker_socket)
bsm.start()
'''For the amount of CRYPTO to trade in USDT'''
lot_size = {}
'''Find step size for each coin
For example, BTC supports a volume accuracy of
0.000001, while XRP only 0.1
'''
for coin in keywords:
try:
info = client.get_symbol_info(coin+PAIRING)
step_size = info['filters'][2]['stepSize']
lot_size[coin+PAIRING] = step_size.index('1') - 1
if lot_size[coin+PAIRING]<0:
lot_size[coin+PAIRING]=0
except:
pass
for coin in keywords:
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin]<0:
lot_size[coin]=0
except:
pass
def calculate_one_volume_from_lot_size(coin, amount):
if coin not in lot_size:
return float('{:.1f}'.format(amount))
else:
return float('{:.{}f}'.format(amount, lot_size[coin]))
def calculate_volume():
while CURRENT_PRICE == {}:
print('Connecting to the socket...')
time.sleep(3)
else:
volume = {}
for coin in CURRENT_PRICE:
volume[coin] = float(QUANTITY / float(CURRENT_PRICE[coin]))
volume[coin] = calculate_one_volume_from_lot_size(coin, volume[coin])
return volume
# load the csv file containg top 100 crypto feeds
# want to scan other websites?
# Simply add the RSS Feed url to the Crypto feeds.csv file
with open('Crypto feeds.csv') as csv_file:
# open the file
csv_reader = csv.reader(csv_file)
# remove any headers
next(csv_reader, None)
# create empty list
feeds = []
# add each row cotaining RSS url to feeds list
for row in csv_reader:
feeds.append(row[0])
# Make headlines global variable as it should be the same across all functions
headlines = {'source': [], 'title': [], 'pubDate' : [] }
async def get_feed_data(session, feed, headers):
'''
Get relevent data from rss feed, in async fashion
:param feed: The name of the feed we want to fetch
:param headers: The header we want on the request
:param timeout: The default timout before we give up and move on
:return: None, we don't need to return anything we append it all on the headlines dict
'''
try:
async with session.get(feed, headers=headers, timeout=60) as response:
# define the root for our parsing
text = await response.text()
root = ET.fromstring(text)
channel = root.find('channel/item/title').text
pubDate = root.find('channel/item/pubDate').text
# some jank to ensure no alien characters are being passed
title = channel.encode('UTF-8').decode('UTF-8')
# convert pubDat to datetime
published = datetime.strptime(pubDate.replace("GMT", "+0000"), '%a, %d %b %Y %H:%M:%S %z')
# calculate timedelta
time_between = datetime.now(pytz.utc) - published
#print(f'Czas: {time_between.total_seconds() / (60 * 60)}')
if time_between.total_seconds() / (60 * 60) <= HOURS_PAST:
# append the source
headlines['source'].append(feed)
# append the publication date
headlines['pubDate'].append(pubDate)
# append the title
headlines['title'].append(title)
print(channel)
except Exception as e:
# Catch any error and also print it
print(f'Could not parse {feed} error is: {e}')
async def get_headlines():
'''
Creates a an async task for each of our feeds which are appended to headlines
:return: None
'''
# add headers to the request for ElementTree. Parsing issues occur without headers
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:87.0) Gecko/20100101 Firefox/87.0'
}
# A nifty timer to see how long it takes to parse all the feeds
start = timer()
async with aiohttp.ClientSession() as session:
tasks = []
for feed in feeds:
task = asyncio.ensure_future(get_feed_data(session, feed, headers))
tasks.append(task)
# This makes sure we finish all tasks/requests before we continue executing our code
await asyncio.gather(*tasks)
end = timer()
print("Time it took to parse feeds: ", end - start)
def categorise_headlines():
'''arrange all headlines scaped in a dictionary matching the coin's name'''
# get the headlines
asyncio.run(get_headlines())
categorised_headlines = {}
# this loop will create a dictionary for each keyword defined
for keyword in keywords:
categorised_headlines['{0}'.format(keyword)] = []
# keyword needs to be a loop in order to be able to append headline to the correct dictionary
for keyword in keywords:
# looping through each headline is required as well
for headline in headlines['title']:
# appends the headline containing the keyword to the correct dictionary
if any(key in headline for key in keywords[keyword]):
categorised_headlines[keyword].append(headline)
return categorised_headlines
def analyse_headlines():
'''Analyse categorised headlines and return NLP scores'''
sia = SentimentIntensityAnalyzer()
categorised_headlines = categorise_headlines()
sentiment = {}
for coin in categorised_headlines:
if len(categorised_headlines[coin]) > 0:
# create dict for each coin
sentiment['{0}'.format(coin)] = []
# append sentiment to dict
for title in categorised_headlines[coin]:
sentiment[coin].append(sia.polarity_scores(title))
return sentiment
def compile_sentiment():
'''Arranges every compound value into a list for each coin'''
sentiment = analyse_headlines()
compiled_sentiment = {}
for coin in sentiment:
compiled_sentiment[coin] = []
for item in sentiment[coin]:
# append each compound value to each coin's dict
compiled_sentiment[coin].append(sentiment[coin][sentiment[coin].index(item)]['compound'])
return compiled_sentiment
def compound_average():
'''Calculates and returns the average compoud sentiment for each coin'''
compiled_sentiment = compile_sentiment()
headlines_analysed = {}
for coin in compiled_sentiment:
headlines_analysed[coin] = len(compiled_sentiment[coin])
# calculate the average using numpy if there is more than 1 element in list
compiled_sentiment[coin] = np.array(compiled_sentiment[coin])
# get the mean
compiled_sentiment[coin] = np.mean(compiled_sentiment[coin])
# convert to scalar
compiled_sentiment[coin] = compiled_sentiment[coin].item()
return compiled_sentiment, headlines_analysed
def buy(compiled_sentiment, headlines_analysed):
'''Check if the sentiment is positive and keyword is found for each handle'''
volume = calculate_volume()
for coin in compiled_sentiment:
# check if the sentiment and number of articles are over the given threshold
if compiled_sentiment[coin] > SENTIMENT_THRESHOLD and headlines_analysed[coin] >= MINUMUM_ARTICLES and coins_in_hand[coin]==0:
# check the volume looks correct
print(f'preparing to buy {volume[coin+PAIRING]} {coin} with {PAIRING} at {CURRENT_PRICE[coin+PAIRING]}')
if (testnet):
# create test order before pushing an actual order
test_order = client.create_test_order(symbol=coin+PAIRING, side='BUY', type='MARKET', quantity=volume[coin+PAIRING])
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol=coin+PAIRING,
side='BUY',
type='MARKET',
quantity=volume[coin+PAIRING]
)
#error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return some info
else:
# adds coin to our portfolio
coins_in_hand[coin] += volume[coin+PAIRING]
# retrieve the last order
order = client.get_all_orders(symbol=coin+PAIRING, limit=1)
if order:
# convert order timsestamp into UTC format
time = order[0]['time'] / 1000
utc_time = datetime.fromtimestamp(time)
# grab the price of CRYPTO the order was placed at for reporting
bought_at = CURRENT_PRICE[coin+PAIRING]
# print order condirmation to the console
print(f"order {order[0]['orderId']} has been placed on {coin} with {order[0]['origQty']} at {utc_time} and bought at {bought_at}")
else:
print('Could not get last order from Binance!')
else:
print(f'Sentiment not positive enough for {coin}, or not enough headlines analysed or already bought: {compiled_sentiment[coin]}, {headlines_analysed[coin]}')
def sell(compiled_sentiment, headlines_analysed):
'''Check if the sentiment is negative and keyword is found for each handle'''
for coin in compiled_sentiment:
# check if the sentiment and number of articles are over the given threshold
if compiled_sentiment[coin] < NEGATIVE_SENTIMENT_THRESHOLD and headlines_analysed[coin] >= MINUMUM_ARTICLES and coins_in_hand[coin]>0:
# check the volume looks correct
print(f'preparing to sell {coins_in_hand[coin]} {coin} at {CURRENT_PRICE[coin+PAIRING]}')
amount_to_sell = calculate_one_volume_from_lot_size(coin+PAIRING, coins_in_hand[coin]*99.5/100)
if (testnet):
# create test order before pushing an actual order
test_order = client.create_test_order(symbol=coin+PAIRING, side='SELL', type='MARKET', quantity=amount_to_sell)
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol=coin+PAIRING,
side='SELL',
type='MARKET',
quantity=amount_to_sell
)
#error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return some info
else:
# set coin to 0
coins_in_hand[coin]=0
# retrieve the last order
order = client.get_all_orders(symbol=coin+PAIRING, limit=1)
if order:
# convert order timsestamp into UTC format
time = order[0]['time'] / 1000
utc_time = datetime.fromtimestamp(time)
# grab the price of CRYPTO the order was placed at for reporting
sold_at = CURRENT_PRICE[coin+PAIRING]
# print order condirmation to the console
print(f"order {order[0]['orderId']} has been placed on {coin} with {order[0]['origQty']} coins sold for {sold_at} each at {utc_time}")
else:
print('Could not get last order from Binance!')
else:
print(f'Sentiment not negative enough for {coin}, not enough headlines analysed or not enough {coin} to sell: {compiled_sentiment[coin]}, {headlines_analysed[coin]}')
def save_coins_in_hand_to_file():
# abort saving if dictionary is empty
if not coins_in_hand:
return
# save coins_in_hand to file
with open(coins_in_hand_file_path, 'w') as file:
json.dump(coins_in_hand, file, indent=4)
if __name__ == '__main__':
print('Press Ctrl-Q to stop the script')
for i in count():
compiled_sentiment, headlines_analysed = compound_average()
print("\nBUY CHECKS:")
buy(compiled_sentiment, headlines_analysed)
print("\nSELL CHECKS:")
sell(compiled_sentiment, headlines_analysed)
print('\nCurrent bot holdings: ')
for coin in coins_in_hand:
if coins_in_hand[coin] > 0:
print(f'{coin}: {coins_in_hand[coin]}')
save_coins_in_hand_to_file()
print(f'\nIteration {i}')
time.sleep(60 * REPEAT_EVERY)
| 33.602317 | 178 | 0.645237 |
4a23035b6afed8018c6e4eacd2896ef4d9fb84ca | 982 | py | Python | examples/hacker_news/setup.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | 1 | 2021-07-03T09:05:58.000Z | 2021-07-03T09:05:58.000Z | examples/hacker_news/setup.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | null | null | null | examples/hacker_news/setup.py | devarajnadiger/dagster | fae430f1d9463c23a427efa06dc9d0deb76429bb | [
"Apache-2.0"
] | 1 | 2021-11-30T21:40:46.000Z | 2021-11-30T21:40:46.000Z | from setuptools import find_packages, setup
setup(
name="hacker_news",
version="dev",
author="Elementl",
author_email="[email protected]",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
package_data={"hacker_news": ["hacker_news_dbt/*"]},
install_requires=[
"dagster",
"dagster-aws",
"dagster-dbt",
"dagster-pandas",
"dagster-pyspark",
"dagster-slack",
"dagster-postgres",
"dbt==0.19.0",
"mock",
"pandas",
"pyarrow>=4.0.0",
"pyspark",
"requests",
"gcsfs",
"fsspec",
"s3fs",
"scipy",
"sklearn",
"snowflake-sqlalchemy",
],
extras_require={"tests": ["mypy", "pylint", "pytest"]},
)
| 25.179487 | 59 | 0.529532 |
4a23053ee79a5b0ca4778c00df082e10ca546359 | 3,063 | py | Python | src/wired_components/request/utils.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | 1 | 2019-09-15T12:30:44.000Z | 2019-09-15T12:30:44.000Z | src/wired_components/request/utils.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | null | null | null | src/wired_components/request/utils.py | pauleveritt/wired_components | a9072d5fc48680d5ff895887842ffd0f06bc0081 | [
"MIT"
] | null | null | null | """
Mimic the various Pyramid path helpers.
"""
from typing import Union, Optional, List
from wired_components.resource import Root, Resource
SEP = "/"
# Taken from Sphinx
def relative_uri(base: str, to: str):
"""Return a relative URL from ``base`` to ``to``."""
# if to.startswith(SEP):
# return to
b2 = base.split(SEP)
t2 = to.split(SEP)
# remove common segments (except the last segment)
for x, y in zip(b2[:-1], t2[:-1]):
if x != y:
break
b2.pop(0)
t2.pop(0)
if b2 == t2:
# Special case: relative_uri('f/index.html','f/index.html')
# returns '', not 'index.html'
return ''
if len(b2) == 1 and t2 == ['']:
# Special case: relative_uri('f/index.html','f/') should
# return './', not ''
return '.' + SEP
prefix = ('..' + SEP) * (len(b2) - 1)
main_path = SEP.join(t2)
result = prefix + main_path
return result
def normalize_path(path: str) -> str:
""" All paths should end with a slash """
if not path.endswith('/'):
path += '/'
return path
def find_resource(root: Root, path: str) -> Resource:
if path == '/' or path == '/.':
return root
path = normalize_path(path)
items = iter(path[1:-1].split('/'))
resource = root
while True:
try:
current = next(items)
resource = resource[current]
except StopIteration:
return resource
def parents(resource: Resource) -> List[Optional[Resource]]:
these_parents: List[Optional[Resource]] = []
parent = resource.parent
while parent is not None:
these_parents.append(parent)
parent = parent.parent
return list(reversed(these_parents))
def resource_path(resource: Resource) -> str:
""" Give a slash-separated representation of resource w/ trailing / """
# Bail out quickly if we are the root or in the root
if resource.parent is None:
return '/'
elif resource.parent.parent is None:
return '/' + resource.name + '/'
# The root is '' so skip it
resource_parents = parents(resource)
# Get the names for each parent, then join with slashes
resource_parent_names = [p.name for p in resource_parents if p]
path = '/'.join(resource_parent_names) + '/' + resource.name + '/'
return path
def relative_path(
root: Root, current: Resource, target: Union[Resource, str],
) -> str:
""" Given current resource, generate relative path to target """
# First, if the target is a string path, get the resource
if isinstance(target, str):
target = find_resource(root, normalize_path(target))
result = relative_uri(
base=resource_path(current),
to=resource_path(target)
)
return result
def relative_static_path(current: Resource, target: str):
# Bail out quickly if we are the root or in the root
current_path = resource_path(current)
target_path = target
result = relative_uri(current_path, target_path)
return result
| 27.594595 | 75 | 0.613777 |
4a23057d86a74b89121841778f8c5df325dd8380 | 2,344 | py | Python | predict.py | GT-AcerZhang/yolov3.insects_challenge | 1ac6ee5a8a5c534ec11723542f4c10583935a2ad | [
"MIT"
] | 1 | 2021-02-27T17:59:32.000Z | 2021-02-27T17:59:32.000Z | predict.py | GT-AcerZhang/yolov3.insects_challenge | 1ac6ee5a8a5c534ec11723542f4c10583935a2ad | [
"MIT"
] | 1 | 2020-06-12T07:29:27.000Z | 2020-06-12T07:29:27.000Z | predict.py | GT-AcerZhang/yolov3.insects_challenge | 1ac6ee5a8a5c534ec11723542f4c10583935a2ad | [
"MIT"
] | 1 | 2021-03-05T11:08:50.000Z | 2021-03-05T11:08:50.000Z | # -*- coding: utf-8 -*-
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from reader import single_image_data_loader
from multinms import multiclass_nms
from yolov3 import YOLOv3
from draw_results import draw_results
import argparse
def parse_args():
parser = argparse.ArgumentParser("Evaluation Parameters")
parser.add_argument(
'--image_name',
type=str,
default='./insects/test/images/2599.jpeg',
help='the directory of test images')
parser.add_argument(
'--weight_file',
type=str,
default='./yolo_epoch50',
help='the path of model parameters')
args = parser.parse_args()
return args
args = parse_args()
IMAGE_NAME = args.image_name
WEIGHT_FILE = args.weight_file
ANCHORS = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
ANCHOR_MASKS = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
VALID_THRESH = 0.01
NMS_TOPK = 400
NMS_POSK = 100
NMS_THRESH = 0.45
NUM_CLASSES = 7
if __name__ == '__main__':
with fluid.dygraph.guard():
model = YOLOv3('yolov3', num_classes=NUM_CLASSES, is_train=False)
model_state_dict, _ = fluid.load_dygraph(WEIGHT_FILE)
model.load_dict(model_state_dict)
model.eval()
total_results = []
test_loader = single_image_data_loader(IMAGE_NAME, mode='test')
for i, data in enumerate(test_loader()):
img_name, img_data, img_scale_data = data
img = to_variable(img_data)
img_scale = to_variable(img_scale_data)
outputs = model.forward(img)
bboxes, scores = model.get_pred(outputs,
im_shape=img_scale,
anchors=ANCHORS,
anchor_masks=ANCHOR_MASKS,
valid_thresh = VALID_THRESH)
bboxes_data = bboxes.numpy()
scores_data = scores.numpy()
results = multiclass_nms(bboxes_data, scores_data,
score_thresh=VALID_THRESH,
nms_thresh=NMS_THRESH,
pre_nms_topk=NMS_TOPK,
pos_nms_topk=NMS_POSK)
result = results[0]
draw_results(result, IMAGE_NAME, draw_thresh=0.5)
| 29.670886 | 88 | 0.605375 |
4a230ac34dba750db1e6266fd2c08488cfc9e9b8 | 450 | py | Python | bio_embeddings/__init__.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 219 | 2020-01-19T16:39:09.000Z | 2022-03-21T16:02:04.000Z | bio_embeddings/__init__.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 175 | 2019-12-05T13:27:14.000Z | 2022-03-30T16:58:32.000Z | bio_embeddings/__init__.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 33 | 2019-12-16T09:59:44.000Z | 2022-03-05T06:35:16.000Z | """
The functionality of bio_embeddings is split into 5 different modules
.. autosummary::
bio_embeddings.embed
bio_embeddings.extract
bio_embeddings.project
bio_embeddings.utilities
bio_embeddings.visualize
"""
import bio_embeddings.embed
import bio_embeddings.extract
import bio_embeddings.project
import bio_embeddings.utilities
import bio_embeddings.visualize
__all__ = ["embed", "extract", "project", "utilities", "visualize"]
| 23.684211 | 69 | 0.797778 |
4a230b54319b43590b53a4007e6dcaeed7548014 | 4,667 | py | Python | scripts/compute_attribution_maps.py | dgrinwald93/pytorch_bnns | 09bf7504f30373229579f15e847f1a7f87cf6ef0 | [
"Apache-2.0"
] | null | null | null | scripts/compute_attribution_maps.py | dgrinwald93/pytorch_bnns | 09bf7504f30373229579f15e847f1a7f87cf6ef0 | [
"Apache-2.0"
] | null | null | null | scripts/compute_attribution_maps.py | dgrinwald93/pytorch_bnns | 09bf7504f30373229579f15e847f1a7f87cf6ef0 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import json
import copy
import time
import matplotlib.pyplot as plt
from scipy.stats import entropy
import numpy as np
import torch
from torch import cuda, device
from curvature.sampling import invert_factors
from pbnn.datasets.get_datasets import *
from pbnn.models.utils import sample_curve_network, compute_attribution_map
import lucent
from lucent.util import set_seed
# Fix lucent seed
set_seed(42)
# SETUP #
file_path = './confs/compute_attribution_maps_inceptionv1.json'
with open(file_path, 'r') as j:
xp_conf = json.loads(j.read())
if not os.path.exists(xp_conf['attr_maps_save_path']):
os.makedirs(xp_conf['attr_maps_save_path'])
np.random.seed(xp_conf['np_seed'])
samples = xp_conf['num_nets']
seeds = np.random.randint(0, 10000000, size=(samples, 2))
# GET GPU #
device = device("cuda:0" if cuda.is_available() else "cpu")
print(f'Hardware accelerator: {device}')
# Get data
trainloader, valloader, testloader, testset, idx_to_class, class_to_name = \
globals()['load_'+xp_conf['ds']](
batch_size=xp_conf['batch_size'])
# Load pretrained model and factors
model_path = xp_conf['model_path']
factors_path = xp_conf['factors_path']
model = torch.load(model_path)
factors = torch.load(factors_path)
posterior_mean = copy.deepcopy(model.state_dict())
estimator = 'kfac'
inv_factors = invert_factors(factors, norm=1e3, scale=1e5, estimator='kfac')
# Load max entropy points
pred_dists = np.load(xp_conf['points_path'])
entropies = entropy(pred_dists.reshape(-1,200), axis=1)
print(entropies.shape)
# Compute max entropies
max_entropy_ids = np.argsort(entropies)[::-1]
min_entropy_ids = np.argsort(entropies)
# Highest entropy images
# total = 10
# fig, ax = plt.subplots(1, total, figsize=(30,15))
# for i in range(total):
# img = np.transpose(testset[max_entropy_ids[i]][0], (1,2,0))
# preprocessed_img = (img - img.min()) / (img.max() - img.min())
# ax[i].imshow(preprocessed_img)
# ax[i].set_title(f'ID: {max_entropy_ids[i]}, label: {class_to_name[idx_to_class[testloader.dataset[max_entropy_ids[i]][1]]]}')
# plt.savefig('./experiments/curve_inceptionv1/high_entropy_points/high_entropy_points.png')
# # Lowest entropy images
# total = 10
# fig, ax = plt.subplots(1, total, figsize=(30,15))
# for i in range(total):
# img = np.transpose(testset[min_entropy_ids[i]][0], (1,2,0))
# preprocessed_img = (img - img.min()) / (img.max() - img.min())
# ax[i].imshow(preprocessed_img)
# ax[i].set_title(f'ID: {min_entropy_ids[i]}, label: {class_to_name[idx_to_class[testloader.dataset[min_entropy_ids[i]][1]]]}')
# plt.savefig('./experiments/curve_inceptionv1/high_entropy_points/low_entropy_points.png')
# Loop
# Compute attribution maps for all images
for data_id in max_entropy_ids[:10]:
if not os.path.exists(xp_conf['attr_maps_save_path']+f'{data_id}/'):
os.makedirs(xp_conf['attr_maps_save_path']+f'{data_id}/')
img = testset[data_id][0].reshape(1, 3, 224, 224)
preprocessed_img = (img - img.min()) / (img.max() - img.min())
plt.imshow(np.transpose(preprocessed_img.reshape(3,224,224),(1,2,0)))
fig_save_path = xp_conf['attr_maps_save_path']+f'/{data_id}/'+'original_img'
plt.savefig(fig_save_path)
attr_maps_arr = []
attr_maps_grid = []
preds = []
for i, seed in enumerate(seeds):
# Sample net
sample_curve_network(model, inv_factors, estimator, posterior_mean, seed)
#Compute attribution map
img = np.transpose(testset[data_id][0], (1,2,0))
tmp_attr_map_arr, tmp_attr_map_grid = compute_attribution_map(img=img, model=model,
cell_image_size=60, n_steps=1024,
n_groups=6, layer='mixed5b',
batch_size=64, device=device)
# Save attribution maps
attr_maps_arr.append(tmp_attr_map_arr)
attr_maps_grid.append(tmp_attr_map_grid)
img = testset[data_id][0].reshape(1, 3, 224, 224)
# Compute network predictions
pred = model(img.reshape(1, 3, 224, 224).to(device)).max(1)[1].detach().cpu().numpy()
preds.append((pred[0], class_to_name[idx_to_class[pred[0]]]))
xp_conf['attr_maps_arr'] = attr_maps_arr
xp_conf['attr_maps_grid'] = attr_maps_grid
xp_conf['preds'] = preds
xp_save_path = xp_conf['attr_maps_save_path']+f'/{data_id}/'+f'xp_dict_'+'_'.join('_'.join(time.ctime().split(' ')).split(':'))
np.save(xp_save_path, xp_conf)
| 35.356061 | 132 | 0.667881 |
4a230bc4631a4c98380a434647a49287aef1599e | 15,351 | py | Python | egs/word_embedding/steps/tfrnnlm/fast_cbow.py | charlesliucn/LanMIT | 537dab22335a21285699776ce66d95cb14c3db52 | [
"Apache-2.0"
] | 17 | 2019-07-08T08:34:50.000Z | 2021-11-19T13:39:29.000Z | egs/word_embedding/steps/tfrnnlm/fast_cbow.py | charlesliucn/kaldi-lm | 537dab22335a21285699776ce66d95cb14c3db52 | [
"Apache-2.0"
] | null | null | null | egs/word_embedding/steps/tfrnnlm/fast_cbow.py | charlesliucn/kaldi-lm | 537dab22335a21285699776ce66d95cb14c3db52 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import sys
import time
import math
import reader
import random
import inspect
import collections
import numpy as np
import tensorflow as tf
reload(sys)
sys.setdefaultencoding("utf-8")
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
session = tf.Session(config = config)
flags = tf.flags
logging = tf.logging
flags.DEFINE_string("data-path", None, "Where the training/test data is stored.")
flags.DEFINE_string("vocab-path", None, "Where the wordlist file is stored.")
flags.DEFINE_string("save-path", None, "Model output directory.")
flags.DEFINE_integer("hidden-size", 200, "hidden dim of RNN")
flags.DEFINE_integer("num-layers", 2, "number of layers of RNN")
flags.DEFINE_integer("batch-size", 64, "batch size of RNN training")
flags.DEFINE_float("keep-prob", 1.0, "Keep Probability of Dropout")
flags.DEFINE_integer("max-epoch", 30, "The number of max epoch")
FLAGS = flags.FLAGS
class Config(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 30
keep_prob = 1.0
lr_decay = 0.8
batch_size = 64
# this new "softmax" function we show can train a "self-normalized" RNNLM where
# the sum of the output is automatically (close to) 1.0
# which saves a lot of computation for lattice-rescoring
def new_softmax(labels, logits):
target = tf.reshape(labels, [-1])
f_logits = tf.exp(logits)
row_sums = tf.reduce_sum(f_logits, 1) # this is the negative part of the objf
t2 = tf.expand_dims(target, 1)
range = tf.expand_dims(tf.range(tf.shape(target)[0]), 1)
ind = tf.concat([range, t2], 1)
res = tf.gather_nd(logits, ind)
return -res + row_sums - 1
class RnnlmInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.rnnlm_producer(
data, batch_size, num_steps, name=name)
class RnnlmModel(object):
"""The RNNLM model."""
def __init__(self, is_training, config, input_, cbow_embeddings):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
hidden_size = config.hidden_size
vocab_size = config.vocab_size
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
hidden_size, forget_bias = 0.0, state_is_tuple = True,
reuse = tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
hidden_size, forget_bias = 0.0, state_is_tuple = True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob = config.keep_prob)
self.cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)],
state_is_tuple = True)
self._initial_state = self.cell.zero_state(batch_size, tf.float32)
self._initial_state_single = self.cell.zero_state(1, tf.float32)
self.initial = tf.reshape(tf.stack(axis = 0, values = self._initial_state_single),
[config.num_layers, 2, 1, hidden_size], name = "test_initial_state")
# first implement the less efficient version
test_word_in = tf.placeholder(tf.int32, [1, 1], name="test_word_in")
state_placeholder = tf.placeholder(tf.float32,
[config.num_layers, 2, 1, hidden_size], name = "test_state_in")
# unpacking the input state context
l = tf.unstack(state_placeholder, axis=0)
test_input_state = tuple(
[tf.contrib.rnn.LSTMStateTuple(l[idx][0],l[idx][1])
for idx in range(config.num_layers)]
)
with tf.device("/cpu:0"):
embed_init = tf.constant_initializer(cbow_embeddings, dtype = tf.float32)
self.embedding = tf.get_variable("embedding", shape = [vocab_size, hidden_size],
dtype = tf.float32, initializer = embed_init)
inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in)
# test time
with tf.variable_scope("RNN"):
(test_cell_output, test_output_state) = self.cell(test_inputs[:, 0, :], test_input_state)
test_state_out = tf.reshape(tf.stack(axis = 0, values = test_output_state),
[config.num_layers, 2, 1, hidden_size], name = "test_state_out")
test_cell_out = tf.reshape(test_cell_output, [1, hidden_size], name = "test_cell_out")
# above is the first part of the graph for test
# test-word-in
# > ---- > test-state-out
# test-state-in > test-cell-out
# below is the 2nd part of the graph for test
# test-word-out
# > prob(word | test-word-out)
# test-cell-in
test_word_out = tf.placeholder(tf.int32, [1, 1], name = "test_word_out")
cellout_placeholder = tf.placeholder(tf.float32, [1, hidden_size], name = "test_cell_in")
softmax_w = tf.get_variable("softmax_w", [hidden_size, vocab_size], dtype = tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype = tf.float32)
softmax_b = softmax_b - 9.0
test_logits = tf.matmul(cellout_placeholder,
tf.transpose(tf.nn.embedding_lookup(tf.transpose(softmax_w), test_word_out[0]))) + softmax_b[test_word_out[0,0]]
p_word = test_logits[0, 0]
test_out = tf.identity(p_word, name = "test_out")
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > -1: tf.get_variable_scope().reuse_variables()
(cell_output, state) = self.cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, hidden_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)],
softmax_loss_function = new_softmax)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
data_index = 0
def generate_batch(train_data, embed_batch_size, skip_window):
global data_index
span = 2 * skip_window + 1
batch = np.ndarray(shape = (embed_batch_size, span - 1), dtype = np.int32)
labels = np.ndarray(shape = (embed_batch_size, 1), dtype = np.int32)
buffer = collections.deque(maxlen = span)
for _ in range(span):
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
for i in range(embed_batch_size):
target = skip_window
targets_to_avoid = [skip_window]
col_idx = 0
for j in range(span):
if j == span // 2:
continue
batch[i, col_idx] = buffer[j]
col_idx += 1
labels[i, 0] = buffer[target]
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
assert batch.shape[0] == embed_batch_size and batch.shape[1] == span - 1
return batch, labels
def get_config():
return Config()
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to RNNLM data directory")
raw_data = reader.rnnlm_raw_data(FLAGS.data_path, FLAGS.vocab_path)
train_data, valid_data, _, word_map = raw_data
reverse_wordmap = dict(zip(word_map.values(), word_map.keys()))
vocabulary_size = len(word_map)
config = get_config()
config.vocab_size = len(word_map)
config.hidden_size = FLAGS.hidden_size
config.num_layers = FLAGS.num_layers
config.batch_size = FLAGS.batch_size
config.keep_prob= FLAGS.keep_prob
config.max_max_epoch = FLAGS.max_epoch
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
# word embedding参数设置
embed_batch_size = 128
embedding_size = 200
skip_window = 1
valid_size = 16
valid_window = 100
embed_num_steps = 100001
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(
valid_examples,
random.sample(range(1000, 1000 + valid_window),valid_size // 2))
num_sampled = 64
graph_cbow = tf.Graph()
with graph_cbow.as_default():
train_dataset = tf.placeholder(tf.int32, shape = [embed_batch_size, 2 * skip_window])
train_labels = tf.placeholder(tf.int32, shape = [embed_batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev = 1.0/math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embeds = None
for i in range(2 * skip_window):
embedding_i = tf.nn.embedding_lookup(embeddings, train_dataset[:, i])
print("embedding %d shape: %s" % (i, embedding_i.get_shape().as_list()))
emb_x, emb_y = embedding_i.get_shape().as_list()
if embeds is None:
embeds = tf.reshape(embedding_i, [emb_x, emb_y, 1])
else:
embeds = tf.concat([embeds, tf.reshape(embedding_i, [emb_x, emb_y, 1])], axis = 2)
# assert embeds.get_shape().as_list()[2] == 2 * skip_window
print("Concat embedding size: %s" % embeds.get_shape().as_list())
avg_embed = tf.reduce_mean(embeds, 2, keep_dims = False)
print("Average embedding size: %s" % avg_embed.get_shape().as_list())
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights = softmax_weights,
biases = softmax_biases,
inputs = avg_embed,
labels = train_labels,
num_sampled = num_sampled,
num_classes = vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph = graph_cbow) as session:
tf.global_variables_initializer().run()
print("Initialized!")
average_loss = 0
for step in range(embed_num_steps):
batch_data, batch_labels = generate_batch(
train_data = train_data,
embed_batch_size = embed_batch_size,
skip_window = skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, lo = session.run([optimizer, loss], feed_dict = feed_dict)
average_loss += lo
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print("Average loss at step %d: %f " % (step, average_loss))
average_loss = 0
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_wordmap[valid_examples[i]]
top_k = 8
nearest = (-sim[i, :]).argsort()[1: top_k + 1]
log = "Nearest to %s: " % valid_word
for k in range(top_k):
close_word = reverse_wordmap[nearest[k]]
log = log + " " + close_word + ","
print(log)
final_embeddings = normalized_embeddings.eval()
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = RnnlmInput(config = config, data = train_data, name = "TrainInput")
with tf.variable_scope("Model", reuse = None, initializer = initializer):
m = RnnlmModel(is_training = True, config = config, input_ = train_input,
cbow_embeddings = final_embeddings)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = RnnlmInput(config = config, data = valid_data, name = "ValidInput")
with tf.variable_scope("Model", reuse = True, initializer = initializer):
mvalid = RnnlmModel(is_training = False, config = config, input_ = valid_input,
cbow_embeddings = final_embeddings)
tf.summary.scalar("Validation Loss", mvalid.cost)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path)
if __name__ == "__main__":
tf.app.run()
| 33.962389 | 115 | 0.712787 |
4a230c50923f6defb1f39ad8b9bf8e41a7288ba8 | 49,485 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/httplib.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/httplib.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/httplib.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | """HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
from array import array
import os
import socket
from sys import py3kwarning
from urlparse import urlsplit
import warnings
with warnings.catch_warnings():
if py3kwarning:
warnings.filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
class HTTPMessage(mimetools.Message):
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
if buffering:
# The caller won't be using any sock.recv() calls, so buffering
# is fine and recommended for performance.
self.fp = sock.makefile('rb')
else:
# The buffer size is specified as zero, because the headers of
# the response are read with readline(). If the reads were
# buffered the readline() calls could consume some of the
# response, which make be read via a recv() on the underlying
# socket.
self.fp = sock.makefile('rb', 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults
line = self.fp.readline()
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith('HTTP/'):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# assume it's a Simple-Response from an 0.9 server
self.fp = LineAndFileWrapper(line, self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print "header:", skip
self.status = status
self.reason = reason.strip()
if version == 'HTTP/1.0':
self.version = 10
elif version.startswith('HTTP/1.'):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == 'HTTP/0.9':
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = HTTPMessage(StringIO())
return
self.msg = HTTPMessage(self.fp, 0)
if self.debuglevel > 0:
for hdr in self.msg.headers:
print "header:", hdr,
# don't let the msg keep an fp
self.msg.fp = None
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.getheader('transfer-encoding')
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
length = self.msg.getheader('content-length')
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == 'HEAD'):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if not self.will_close and \
not self.chunked and \
self.length is None:
self.will_close = 1
def _check_close(self):
conn = self.msg.getheader('connection')
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.getheader('connection')
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.getheader('keep-alive'):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.getheader('proxy-connection')
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = []
while True:
if chunk_left is None:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(''.join(value))
if chunk_left == 0:
break
if amt is None:
value.append(self._safe_read(chunk_left))
elif amt < chunk_left:
value.append(self._safe_read(amt))
self.chunk_left = chunk_left - amt
return ''.join(value)
elif amt == chunk_left:
value.append(self._safe_read(amt))
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return ''.join(value)
else:
value.append(self._safe_read(chunk_left))
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return ''.join(value)
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
# NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
# return less than x bytes unless EOF is encountered. It now handles
# signal interruptions (socket.error EINTR) internally. This code
# never caught that exception anyways. It seems largely pointless.
# self.fp.read(amt) will work fine.
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return ''.join(s)
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
self._set_hostport(self._tunnel_host, self._tunnel_port)
self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
response = self.response_class(self.sock, strict = self.strict,
method = self._method)
(version, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if line == '\r\n': break
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, data):
"""Send `data' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print "send:", repr(data)
blocksize = 8192
if hasattr(data,'read') and not isinstance(data, array):
if self.debuglevel > 0: print "sendIng a read()able"
datablock = data.read(blocksize)
while datablock:
self.sock.sendall(datablock)
datablock = data.read(blocksize)
else:
self.sock.sendall(data)
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
#message_body was not a string (i.e. it is a file) and
#we must run the risk of Nagle
self.send(message_body)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
hdr = '%s %s %s' % (method, url, self._http_vsn_str)
self._output(hdr)
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
hdr = '%s: %s' % (header, '\r\n\t'.join([str(v) for v in values]))
self._output(hdr)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError, te:
# If this is a file-like object, try to
# fstat its file descriptor
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print "Cannot stat!!"
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
self.endheaders(body)
def getresponse(self, buffering=False):
"Get the response from the server."
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
args = (self.sock,)
kwds = {"strict":self.strict, "method":self._method}
if self.debuglevel > 0:
args += (self.debuglevel,)
if buffering:
#only add this keyword if non-default, for compatibility with
#other response_classes.
kwds["buffering"] = True;
response = self.response_class(*args, **kwds)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
class HTTP:
"Compatibility class with httplib.py from 1.5."
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.putheader = conn.putheader
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
if host is not None:
self._conn._set_hostport(host, port)
self._conn.connect()
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
try:
if not buffering:
response = self._conn.getresponse()
else:
#only add this keyword if non-default for compatibility
#with other connection classes
response = self._conn.getresponse(buffering)
except BadStatusLine, e:
### hmm. if getresponse() ever closes the socket on a bad request,
### then we are going to have problems with self.sock
### should we keep this behavior? do people use it?
# keep the socket open (as a file), and return it
self.file = self._conn.sock.makefile('rb', 0)
# close our socket -- we want to restart after any protocol error
self.close()
self.headers = None
return -1, e.line, None
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
HTTPConnection.__init__(self, host, port, strict, timeout,
source_address)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
__all__.append("HTTPSConnection")
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
def test():
"""Test this module.
A hodge podge of tests collected here, because they have too many
external dependencies for the regular test suite.
"""
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'd')
dl = 0
for o, a in opts:
if o == '-d': dl = dl + 1
host = 'www.python.org'
selector = '/'
if args[0:]: host = args[0]
if args[1:]: selector = args[1]
h = HTTP()
h.set_debuglevel(dl)
h.connect(host)
h.putrequest('GET', selector)
h.endheaders()
status, reason, headers = h.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(h.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
# minimal test that code to extract host from url works
class HTTP11(HTTP):
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
h = HTTP11('www.python.org')
h.putrequest('GET', 'http://www.python.org/~jeremy/')
h.endheaders()
h.getreply()
h.close()
try:
import ssl
except ImportError:
pass
else:
for host, selector in (('sourceforge.net', '/projects/python'),
):
print "https://%s%s" % (host, selector)
hs = HTTPS()
hs.set_debuglevel(dl)
hs.connect(host)
hs.putrequest('GET', selector)
hs.endheaders()
status, reason, headers = hs.getreply()
print 'status =', status
print 'reason =', reason
print "read", len(hs.getfile().read())
print
if headers:
for header in headers.headers: print header.strip()
print
if __name__ == '__main__':
test()
| 35.524049 | 86 | 0.558472 |
4a230cb443d1a97b9a69bd2a7164f3612c1e9ecc | 1,077 | py | Python | tests/config/testing.py | medecau/lamson | e78520b857384462b9eecdedfc0f8c2e57cdd00a | [
"BSD-3-Clause"
] | null | null | null | tests/config/testing.py | medecau/lamson | e78520b857384462b9eecdedfc0f8c2e57cdd00a | [
"BSD-3-Clause"
] | null | null | null | tests/config/testing.py | medecau/lamson | e78520b857384462b9eecdedfc0f8c2e57cdd00a | [
"BSD-3-Clause"
] | null | null | null | from config import settings
from lamson import view
from lamson.routing import Router
from lamson.server import Relay
import jinja2
import logging
import logging.config
import os
# configure logging to go to a log file
logging.config.fileConfig("tests/config/logging.conf")
# the relay host to actually send the final message to (set debug=1 to see what
# the relay is saying to the log server).
settings.relay = Relay(host=settings.relay_config['host'],
port=settings.relay_config['port'], debug=0)
settings.receiver = None
Router.defaults(**settings.router_defaults)
Router.load(settings.handlers + settings.queue_handlers)
Router.RELOAD=False
Router.LOG_EXCEPTIONS=False
view.LOADER = jinja2.Environment(loader=jinja2.PackageLoader('lamson_tests', 'templates'))
# if you have pyenchant and enchant installed then the template tests will do
# spell checking for you, but you need to tell pyenchant where to find itself
if 'PYENCHANT_LIBRARY_PATH' not in os.environ:
os.environ['PYENCHANT_LIBRARY_PATH'] = '/opt/local/lib/libenchant.dylib'
| 32.636364 | 90 | 0.777159 |
4a230ccc50f3eb4aab305c414d6ec079c65b6692 | 2,185 | py | Python | one_app/img/app (1).py | wikucha/Alfik | 541124dbe1c458fac54da65a80d9577a9abf3954 | [
"MIT"
] | 1 | 2018-05-17T10:26:51.000Z | 2018-05-17T10:26:51.000Z | one_app/img/app (1).py | wikucha/Alfik | 541124dbe1c458fac54da65a80d9577a9abf3954 | [
"MIT"
] | 21 | 2018-01-26T14:47:43.000Z | 2018-06-08T06:56:17.000Z | one_app/img/app (1).py | wikucha/Alfik | 541124dbe1c458fac54da65a80d9577a9abf3954 | [
"MIT"
] | 1 | 2021-01-20T18:09:54.000Z | 2021-01-20T18:09:54.000Z | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.graphics import Color, Rectangle
from kivy.core.window import Window
from learn import LearnScreen
from memory import MemoryScreen
from fon_quiz import FonquizScreen
from tools import load_lang, load_ulubione
Builder.load_file("app.kv")
# Ekran menu (wygląd wczytany z app.kv)
class MenuScreen(Screen):
pass
# Ekran z ustawieniami
class SettingsScreen(Screen):
pass
# Główna aplikacja zarządzająca grami
class MainApp(App):
def current_lang():
doc = "Obsluga wczytywania jezyka i ulubionych"
def fget(self):
return self._current_lang
def fset(self, value):
fname_dict = "lang/%s/config.py" % (value)
fname_ulubione = "ulubione/%s_ulubione.pickle" % (value)
self._current_lang = fname_dict
self.current_lang_ulubione_file = fname_ulubione
self.current_lang_dict = load_lang(fname_dict)
self.current_lang_ulubione = load_ulubione(fname_ulubione)
def fdel(self):
del self._current_lang
return locals()
current_lang = property(**current_lang())
def build(self):
self._current_lang = None
self.current_lang_ulubione_file = None
self.current_lang_dict = None
self.current_lang_ulubione = None
sm = ScreenManager()
# Ustawienie tła
with sm.canvas:
Color(1, 1, 1)
rect = Rectangle(source="img/tlo.png", size=Window.size)
# Skalowanie obrazka przy zmianie wymiarów okna (orientacji telefonu)
def resize_action(size, pos):
sm.size = Window.size
rect.size = Window.size
sm.bind(pos=resize_action, size=resize_action)
# Aplikacje którymi zarządzamy
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(LearnScreen(name='game'))
sm.add_widget(MemoryScreen(name='memory'))
sm.add_widget(FonquizScreen(name='fonquiz'))
sm.add_widget(SettingsScreen(name='settings'))
return sm
if __name__ == '__main__':
MainApp().run()
| 29.931507 | 77 | 0.667735 |
4a230d265f8e43711e49f539fbc194d7c430039c | 1,087 | py | Python | HausdorffDistance/ROICoordinate.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | 1 | 2021-05-29T22:48:49.000Z | 2021-05-29T22:48:49.000Z | HausdorffDistance/ROICoordinate.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | null | null | null | HausdorffDistance/ROICoordinate.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#根据ROI类型,获取ROI的点坐标
def RoiCoord(structure_set,x):
#每一序列下有多个ROIGeometries,可用RoiGeometries[0],[1]或RoiGeometries['ROI名']访问具体的一个ROI
roi_geometries=structure_set.RoiGeometries[x]
#Contours中包含有组成该ROI轮廓线的所有点坐标(DICOM格式)
#指定Contours的长度len为其所含的CT层数,类型为list,可用Contours[0],[1]访问
#每一层CT指定轮廓线Contours[0]的长度len为点的数目,可用Contours[0][0],[0][1]访问
#每一个点长度len为3,用Contours[0][0].x,y,z来分别获取坐标
c1=[]
if hasattr(roi_geometries.PrimaryShape,'Contours'):
contour=roi_geometries.PrimaryShape.Contours
#记录所有点的坐标,结果为一列表
for i in range(len(contour)):
for j in range(len(contour[i])):
c1.append([contour[i][j].x,contour[i][j].y,contour[i][j].z])
elif hasattr(roi_geometries.PrimaryShape,'Vertices'):
contour=roi_geometries.PrimaryShape.Vertices
#指定ROI的长度len为其所含的点数,类型为list,可用Vertices[0],[1]访问
#每一个点长度len为3,用Vertices[0].x,y,z来分别获取坐标
for i in range(len(contour)):
c1.append([contour[i].x,contour[i].y,contour[i].z])
else:
print 'type of ROI is not matched'
return c1 | 43.48 | 81 | 0.687213 |
4a230db78c41cce0d8f52d733e02dacbb0a81496 | 26,538 | py | Python | tensorforce/models/memory_model.py | hcarlens/tensorforce | e28898c6a07c2d4b4245008dee7a5324f0530cbe | [
"Apache-2.0"
] | null | null | null | tensorforce/models/memory_model.py | hcarlens/tensorforce | e28898c6a07c2d4b4245008dee7a5324f0530cbe | [
"Apache-2.0"
] | null | null | null | tensorforce/models/memory_model.py | hcarlens/tensorforce | e28898c6a07c2d4b4245008dee7a5324f0530cbe | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.core.memories import Memory
from tensorforce.core.optimizers import Optimizer
from tensorforce.models import Model
class MemoryModel(Model):
"""
A memory model is a generic model to accumulate and sample data.
Child classes need to implement the following methods:
- `tf_loss_per_instance(states, internals, actions, terminal, reward)` returning the loss
per instance for a batch.
- `tf_regularization_losses(states, internals)` returning a dict of regularization losses.
"""
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount
):
"""
Memory model.
Args:
states (spec): The state-space description dictionary.
actions (spec): The action-space description dictionary.
scope (str): The root scope str to use for tf variable scoping.
device (str): The name of the device to run the graph of this model on.
saver (spec): Dict specifying whether and how to save the model's parameters.
summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.
execution (spec): Dict specifying whether and how to do distributed training on the model's graph.
batching_capacity (int): Batching capacity.
variable_noise (float): The stddev value of a Normal distribution used for adding random
noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).
Use None for not adding any noise.
states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals
(e.g. normalization, greyscale, etc..).
actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's
"action outputs" (e.g. epsilon-greedy).
reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming
from the Environment (e.g. reward normalization).
update_mode (spec): Update mode.
memory (spec): Memory.
optimizer (spec): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.
discount (float): The RL reward discount factor (gamma).
"""
self.update_mode = update_mode
self.memory_spec = memory
self.optimizer_spec = optimizer
# Discount
assert discount is None or discount >= 0.0
self.discount = discount
self.memory = None
self.optimizer = None
self.fn_discounted_cumulative_reward = None
self.fn_reference = None
self.fn_loss_per_instance = None
self.fn_regularization_losses = None
self.fn_loss = None
self.fn_optimization = None
self.fn_import_experience = None
super(MemoryModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing
)
def as_local_model(self):
"""
Makes sure our optimizer is wrapped into the global_optimizer meta. This is only relevant for distributed RL.
"""
super(MemoryModel, self).as_local_model()
self.optimizer_spec = dict(
type='global_optimizer',
optimizer=self.optimizer_spec
)
def setup_components_and_tf_funcs(self, custom_getter=None):
"""
Constructs the memory and the optimizer objects.
Generates and stores all template functions.
"""
custom_getter = super(MemoryModel, self).setup_components_and_tf_funcs(custom_getter)
# Memory
self.memory = Memory.from_spec(
spec=self.memory_spec,
kwargs=dict(
states=self.states_spec,
internals=self.internals_spec,
actions=self.actions_spec,
summary_labels=self.summary_labels
)
)
# Optimizer
self.optimizer = Optimizer.from_spec(
spec=self.optimizer_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
# TensorFlow functions
self.fn_discounted_cumulative_reward = tf.make_template(
name_='discounted-cumulative-reward',
func_=self.tf_discounted_cumulative_reward,
custom_getter_=custom_getter
)
self.fn_reference = tf.make_template(
name_='reference',
func_=self.tf_reference,
custom_getter_=custom_getter
)
self.fn_loss_per_instance = tf.make_template(
name_='loss-per-instance',
func_=self.tf_loss_per_instance,
custom_getter_=custom_getter
)
self.fn_regularization_losses = tf.make_template(
name_='regularization-losses',
func_=self.tf_regularization_losses,
custom_getter_=custom_getter
)
self.fn_loss = tf.make_template(
name_='loss',
func_=self.tf_loss,
custom_getter_=custom_getter
)
self.fn_optimization = tf.make_template(
name_='optimization',
func_=self.tf_optimization,
custom_getter_=custom_getter
)
self.fn_import_experience = tf.make_template(
name_='import-experience',
func_=self.tf_import_experience,
custom_getter_=custom_getter
)
return custom_getter
def tf_initialize(self):
"""
Also initializes our Memory object (self.memory).
"""
super(MemoryModel, self).tf_initialize()
self.memory.initialize()
#def tf_discounted_cumulative_reward(self, terminal, reward, discount, final_reward=0.0):
# """
# Creates the TensorFlow operations for calculating the discounted cumulative rewards
# for a given sequence of rewards.
# Args:
# terminal: Terminal boolean tensor.
# reward: Reward tensor.
# discount: Discount factor.
# final_reward: Last reward value in the sequence.
# Returns:
# Discounted cumulative reward tensor.
# """
# # TODO: n-step cumulative reward (particularly for envs without terminal)
# def cumulate(cumulative, reward_and_terminal):
# rew, term = reward_and_terminal
# return tf.where(condition=term, x=rew, y=(rew + cumulative * discount))
# # Reverse since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right
# reward = tf.reverse(tensor=reward, axis=(0,))
# terminal = tf.reverse(tensor=terminal, axis=(0,))
# reward = tf.scan(fn=cumulate, elems=(reward, terminal), initializer=tf.stop_gradient(input=final_reward))
# return tf.reverse(tensor=reward, axis=(0,))
# TODO: could be a utility helper function if we remove self.discount and only allow external discount-value input
def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=0.0, horizon=0):
"""
Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards
for a given sequence of single rewards.
Example:
single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0
terminal = False, False, False, False True False
gamma = 0.95
final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)
horizon=3
output = 2.95 1.45 1.38 1.45 1.0 94.0
Args:
terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one
True value. If its very last element is False (not terminating), the given `final_reward` value
is assumed to follow the last value in the single rewards sequence (see below).
reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,
an assumed last reward of the value of `final_reward` will be used.
discount (float): The discount factor (gamma). By default, take the Model's discount factor.
final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence
ends with False). This value will be ignored if horizon == 1 or discount == 0.0.
horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks
without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the
exact same results as a discount factor of 0.0.
Returns:
Discounted cumulative reward tensor with the same shape as `reward`.
"""
# By default -> take Model's gamma value
if discount is None:
discount = self.discount
# Accumulates discounted (n-step) reward (start new if terminal)
def cumulate(cumulative, reward_terminal_horizon_subtract):
rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract
return tf.where(
# If terminal, start new cumulation.
condition=is_terminal,
x=rew,
y=tf.where(
# If we are above the horizon length (H) -> subtract discounted value from H steps back.
condition=is_over_horizon,
x=(rew + cumulative * discount - sub),
y=(rew + cumulative * discount)
)
)
# Accumulates length of episodes (starts new if terminal)
def len_(cumulative, term):
return tf.where(
condition=term,
# Start counting from 1 after is-terminal signal
x=tf.ones(shape=(), dtype=tf.int32),
# Otherwise, increase length by 1
y=cumulative + 1
)
# Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right.
reward = tf.reverse(tensor=reward, axis=(0,))
# e.g. -1.0 1.0 0.5 0.0 1.0 2.0
terminal = tf.reverse(tensor=terminal, axis=(0,))
# e.g. F T F F F F
# Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count).
lengths = tf.scan(fn=len_, elems=terminal, initializer=0)
# e.g. 1 1 2 3 4 5
off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))
# e.g. F F F F T T
# Calculate the horizon-subtraction value for each step.
if horizon > 0:
horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)
# Shift right by size of horizon (fill rest with 0.0).
horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=0)
horizon_subtractions = tf.slice(horizon_subtractions, begin=(0,), size=tf.shape(reward))
# e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3
# all 0.0 if infinite horizon (special case: horizon=0)
else:
horizon_subtractions = tf.zeros(shape=tf.shape(reward))
# Now do the scan, each time summing up the previous step (discounted by gamma) and
# subtracting the respective `horizon_subtraction`.
reward = tf.scan(
fn=cumulate,
elems=(reward, terminal, off_horizon, horizon_subtractions),
initializer=final_reward if horizon != 1 else 0.0
)
# Re-reverse again to match input sequences.
return tf.reverse(tensor=reward, axis=(0,))
def tf_reference(self, states, internals, actions, terminal, reward, next_states, next_internals, update):
"""
Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a
comparative loss.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Reference tensor(s).
"""
return None
def tf_loss_per_instance(self, states, internals, actions, terminal, reward,
next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor.
"""
raise NotImplementedError
def tf_regularization_losses(self, states, internals, update):
"""
Creates the TensorFlow operations for calculating the regularization losses for the given input states.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
Returns:
Dict of regularization loss tensors.
"""
return dict()
def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
"""
Creates the TensorFlow operations for calculating the full loss of a batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss tensor.
"""
# Mean loss per instance
loss_per_instance = self.fn_loss_per_instance(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=update,
reference=reference
)
# Returns no-op.
updated = self.memory.update_batch(loss_per_instance=loss_per_instance)
with tf.control_dependencies(control_inputs=(updated,)):
loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=0)
# Loss without regularization summary.
if 'losses' in self.summary_labels:
tf.contrib.summary.scalar(name='loss-without-regularization', tensor=loss)
# Regularization losses.
losses = self.fn_regularization_losses(states=states, internals=internals, update=update)
if len(losses) > 0:
loss += tf.add_n(inputs=[losses[name] for name in sorted(losses)])
if 'regularization' in self.summary_labels:
for name in sorted(losses):
tf.contrib.summary.scalar(name=('regularization/' + name), tensor=losses[name])
# Total loss summary.
if 'losses' in self.summary_labels or 'total-loss' in self.summary_labels:
tf.contrib.summary.scalar(name='total-loss', tensor=loss)
return loss
def optimizer_arguments(self, states, internals, actions, terminal, reward, next_states, next_internals):
"""
Returns the optimizer arguments including the time, the list of variables to optimize,
and various functions which the optimizer might require to perform an update step.
Args:
states (dict): Dict of state tensors.
internals (dict): Dict of prior internal state tensors.
actions (dict): Dict of action tensors.
terminal: 1D boolean is-terminal tensor.
reward: 1D (float) rewards tensor.
next_states (dict): Dict of successor state tensors.
next_internals (dict): Dict of posterior internal state tensors.
Returns:
Optimizer arguments as dict to be used as **kwargs to the optimizer.
"""
arguments = dict(
time=self.global_timestep,
variables=self.get_variables(),
arguments=dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals,
update=tf.constant(value=True)
),
fn_reference=self.fn_reference,
fn_loss=self.fn_loss
)
if self.global_model is not None:
arguments['global_variables'] = self.global_model.get_variables()
return arguments
def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
The optimization operation.
"""
arguments = self.optimizer_arguments(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
return self.optimizer.minimize(**arguments)
def tf_observe_timestep(self, states, internals, actions, terminal, reward):
"""
Creates and returns the op that - if frequency condition is hit - pulls a batch from the memory
and does one optimization step.
"""
# Store timestep in memory
stored = self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
# Periodic optimization
with tf.control_dependencies(control_inputs=(stored,)):
unit = self.update_mode['unit']
batch_size = self.update_mode['batch_size']
frequency = self.update_mode.get('frequency', batch_size)
first_update = self.update_mode.get('first_update', 0)
if unit == 'timesteps':
# Timestep-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.timestep, y=batch_size),
y=tf.greater_equal(x=self.timestep, y=first_update)
)
)
batch = self.memory.retrieve_timesteps(n=batch_size)
elif unit == 'episodes':
# Episode-based batch
optimize = tf.logical_and(
x=tf.equal(x=(self.episode % frequency), y=0),
y=tf.logical_and(
# Only update once per episode increment.
x=tf.greater(x=tf.count_nonzero(input_tensor=terminal), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.episode, y=batch_size),
y=tf.greater_equal(x=self.episode, y=first_update)
)
)
)
batch = self.memory.retrieve_episodes(n=batch_size)
elif unit == 'sequences':
# Timestep-sequence-based batch
sequence_length = self.update_mode.get('length', 8)
optimize = tf.logical_and(
x=tf.equal(x=(self.timestep % frequency), y=0),
y=tf.logical_and(
x=tf.greater_equal(x=self.timestep, y=(batch_size + sequence_length - 1)),
y=tf.greater_equal(x=self.timestep, y=first_update)
)
)
batch = self.memory.retrieve_sequences(n=batch_size, sequence_length=sequence_length)
else:
raise TensorForceError("Invalid update unit: {}.".format(unit))
# Do not calculate gradients for memory-internal operations.
batch = util.map_tensors(
fn=(lambda tensor: tf.stop_gradient(input=tensor)),
tensors=batch
)
def true_fn():
optimize = self.fn_optimization(**batch)
with tf.control_dependencies(control_inputs=(optimize,)):
return tf.logical_and(x=True, y=True)
return tf.cond(pred=optimize, true_fn=true_fn, false_fn=tf.no_op)
def tf_import_experience(self, states, internals, actions, terminal, reward):
"""
Imports experiences into the TensorFlow memory structure. Can be used to import
off-policy data.
:param states: Dict of state values to import with keys as state names and values as values to set.
:param internals: Internal values to set, can be fetched from agent via agent.current_internals
if no values available.
:param actions: Dict of action values to import with keys as action names and values as values to set.
:param terminal: Terminal value(s)
:param reward: Reward value(s)
"""
return self.memory.store(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
def create_operations(self, states, internals, actions, terminal, reward, deterministic, independent):
# Import experience operation.
self.import_experience_output = self.fn_import_experience(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
super(MemoryModel, self).create_operations(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
deterministic=deterministic,
independent=independent
)
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(MemoryModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
if include_nontrainable:
memory_variables = self.memory.get_variables()
model_variables += memory_variables
optimizer_variables = self.optimizer.get_variables()
# For some reason, some optimizer variables are only registered in the model.
for variable in optimizer_variables:
if variable in model_variables:
model_variables.remove(variable)
model_variables += optimizer_variables
return model_variables
def import_experience(self, states, internals, actions, terminal, reward):
"""
Stores experiences.
"""
fetches = self.import_experience_output
feed_dict = self.get_feed_dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)
| 41.336449 | 121 | 0.612593 |
4a230f2c152b6cd81c2fba8a78397490d0b2f074 | 20,499 | py | Python | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | PatrykNeubauer/NeMo | 3ada744b884dba5f233f22c6991fc6092c6ca8d0 | [
"Apache-2.0"
] | 2 | 2021-09-21T07:36:20.000Z | 2022-02-05T15:29:04.000Z | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | PatrykNeubauer/NeMo | 3ada744b884dba5f233f22c6991fc6092c6ca8d0 | [
"Apache-2.0"
] | null | null | null | nemo/collections/nlp/models/intent_slot_classification/intent_slot_classification_model.py | PatrykNeubauer/NeMo | 3ada744b884dba5f233f22c6991fc6092c6ca8d0 | [
"Apache-2.0"
] | 12 | 2021-06-20T08:56:10.000Z | 2022-03-16T19:07:10.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional
import onnx
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss
from nemo.collections.nlp.data.intent_slot_classification import (
IntentSlotClassificationDataset,
IntentSlotDataDesc,
IntentSlotInferenceDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.collections.nlp.modules.common import SequenceTokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.parts.utils_funcs import tensor2list
from nemo.core.classes import typecheck
from nemo.core.classes.common import PretrainedModelInfo
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
class IntentSlotClassificationModel(NLPModel):
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return self.classifier.output_types
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
""" Initializes BERT Joint Intent and Slot model.
"""
self.max_seq_length = cfg.language_model.max_seq_length
# Setup tokenizer.
self.setup_tokenizer(cfg.tokenizer)
# Check the presence of data_dir.
if not cfg.data_dir or not os.path.exists(cfg.data_dir):
# Disable setup methods.
IntentSlotClassificationModel._set_model_restore_state(is_being_restored=True)
# Set default values of data_desc.
self._set_defaults_data_desc(cfg)
else:
self.data_dir = cfg.data_dir
# Update configuration of data_desc.
self._set_data_desc_to_cfg(cfg, cfg.data_dir, cfg.train_ds, cfg.validation_ds)
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
# Enable setup methods.
IntentSlotClassificationModel._set_model_restore_state(is_being_restored=False)
# Initialize Bert model
self.bert_model = get_lm_model(
pretrained_model_name=self.cfg.language_model.pretrained_model_name,
config_file=self.register_artifact('language_model.config_file', cfg.language_model.config_file),
config_dict=OmegaConf.to_container(self.cfg.language_model.config)
if self.cfg.language_model.config
else None,
checkpoint_file=self.cfg.language_model.lm_checkpoint,
vocab_file=self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file),
)
# Initialize Classifier.
self._reconfigure_classifier()
def _set_defaults_data_desc(self, cfg):
"""
Method makes sure that cfg.data_desc params are set.
If not, set's them to "dummy" defaults.
"""
if not hasattr(cfg, "data_desc"):
OmegaConf.set_struct(cfg, False)
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = " "
cfg.data_desc.intent_label_ids = {" ": 0}
cfg.data_desc.intent_weights = [1]
# Slots.
cfg.data_desc.slot_labels = " "
cfg.data_desc.slot_label_ids = {" ": 0}
cfg.data_desc.slot_weights = [1]
cfg.data_desc.pad_label = "O"
OmegaConf.set_struct(cfg, True)
def _set_data_desc_to_cfg(self, cfg, data_dir, train_ds, validation_ds):
""" Method creates IntentSlotDataDesc and copies generated values to cfg.data_desc. """
# Save data from data desc to config - so it can be reused later, e.g. in inference.
data_desc = IntentSlotDataDesc(data_dir=data_dir, modes=[train_ds.prefix, validation_ds.prefix])
OmegaConf.set_struct(cfg, False)
if not hasattr(cfg, "data_desc") or cfg.data_desc is None:
cfg.data_desc = {}
# Intents.
cfg.data_desc.intent_labels = list(data_desc.intents_label_ids.keys())
cfg.data_desc.intent_label_ids = data_desc.intents_label_ids
cfg.data_desc.intent_weights = data_desc.intent_weights
# Slots.
cfg.data_desc.slot_labels = list(data_desc.slots_label_ids.keys())
cfg.data_desc.slot_label_ids = data_desc.slots_label_ids
cfg.data_desc.slot_weights = data_desc.slot_weights
cfg.data_desc.pad_label = data_desc.pad_label
# for older(pre - 1.0.0.b3) configs compatibility
if not hasattr(cfg, "class_labels") or cfg.class_labels is None:
cfg.class_labels = {}
cfg.class_labels = OmegaConf.create(
{'intent_labels_file': 'intent_labels.csv', 'slot_labels_file': 'slot_labels.csv'}
)
slot_labels_file = os.path.join(data_dir, cfg.class_labels.slot_labels_file)
intent_labels_file = os.path.join(data_dir, cfg.class_labels.intent_labels_file)
self._save_label_ids(data_desc.slots_label_ids, slot_labels_file)
self._save_label_ids(data_desc.intents_label_ids, intent_labels_file)
self.register_artifact('class_labels.intent_labels_file', intent_labels_file)
self.register_artifact('class_labels.slot_labels_file', slot_labels_file)
OmegaConf.set_struct(cfg, True)
def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None:
""" Saves label ids map to a file """
with open(filename, 'w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
logging.info(f'Labels: {label_ids}')
logging.info(f'Labels mapping saved to : {out.name}')
def _reconfigure_classifier(self):
""" Method reconfigures the classifier depending on the settings of model cfg.data_desc """
self.classifier = SequenceTokenClassifier(
hidden_size=self.bert_model.config.hidden_size,
num_intents=len(self.cfg.data_desc.intent_labels),
num_slots=len(self.cfg.data_desc.slot_labels),
dropout=self.cfg.head.fc_dropout,
num_layers=self.cfg.head.num_output_layers,
log_softmax=False,
)
# define losses
if self.cfg.class_balancing == 'weighted_loss':
# You may need to increase the number of epochs for convergence when using weighted_loss
self.intent_loss = CrossEntropyLoss(logits_ndim=2, weight=self.cfg.data_desc.intent_weights)
self.slot_loss = CrossEntropyLoss(logits_ndim=3, weight=self.cfg.data_desc.slot_weights)
else:
self.intent_loss = CrossEntropyLoss(logits_ndim=2)
self.slot_loss = CrossEntropyLoss(logits_ndim=3)
self.total_loss = AggregatorLoss(
num_inputs=2, weights=[self.cfg.intent_loss_weight, 1.0 - self.cfg.intent_loss_weight]
)
# setup to track metrics
self.intent_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.intent_labels),
label_ids=self.cfg.data_desc.intent_label_ids,
dist_sync_on_step=True,
mode='micro',
)
self.slot_classification_report = ClassificationReport(
num_classes=len(self.cfg.data_desc.slot_labels),
label_ids=self.cfg.data_desc.slot_label_ids,
dist_sync_on_step=True,
mode='micro',
)
def update_data_dir_for_training(self, data_dir: str, train_ds, validation_ds) -> None:
"""
Update data directory and get data stats with Data Descriptor.
Also, reconfigures the classifier - to cope with data with e.g. different number of slots.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
# Update configuration with new data.
self._set_data_desc_to_cfg(self.cfg, data_dir, train_ds, validation_ds)
# Reconfigure the classifier for different settings (number of intents, slots etc.).
self._reconfigure_classifier()
def update_data_dir_for_testing(self, data_dir) -> None:
"""
Update data directory.
Args:
data_dir: path to data directory
"""
logging.info(f'Setting data_dir to {data_dir}.')
self.data_dir = data_dir
@typecheck()
def forward(self, input_ids, token_type_ids, attention_mask):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
intent_logits, slot_logits = self.classifier(hidden_states=hidden_states)
return intent_logits, slot_logits
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
train_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', train_loss)
self.log('lr', lr, prog_bar=True)
return {
'loss': train_loss,
'lr': lr,
}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask, intent_labels, slot_labels = batch
intent_logits, slot_logits = self(
input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask
)
# calculate combined loss for intents and slots
intent_loss = self.intent_loss(logits=intent_logits, labels=intent_labels)
slot_loss = self.slot_loss(logits=slot_logits, labels=slot_labels, loss_mask=loss_mask)
val_loss = self.total_loss(loss_1=intent_loss, loss_2=slot_loss)
# calculate accuracy metrics for intents and slot reporting
# intents
preds = torch.argmax(intent_logits, axis=-1)
self.intent_classification_report.update(preds, intent_labels)
# slots
subtokens_mask = subtokens_mask > 0.5
preds = torch.argmax(slot_logits, axis=-1)[subtokens_mask]
slot_labels = slot_labels[subtokens_mask]
self.slot_classification_report.update(preds, slot_labels)
return {
'val_loss': val_loss,
'intent_tp': self.intent_classification_report.tp,
'intent_fn': self.intent_classification_report.fn,
'intent_fp': self.intent_classification_report.fp,
'slot_tp': self.slot_classification_report.tp,
'slot_fn': self.slot_classification_report.fn,
'slot_fp': self.slot_classification_report.fp,
}
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
# calculate metrics and log classification report (separately for intents and slots)
intent_precision, intent_recall, intent_f1, intent_report = self.intent_classification_report.compute()
logging.info(f'Intent report: {intent_report}')
slot_precision, slot_recall, slot_f1, slot_report = self.slot_classification_report.compute()
logging.info(f'Slot report: {slot_report}')
self.log('val_loss', avg_loss)
self.log('intent_precision', intent_precision)
self.log('intent_recall', intent_recall)
self.log('intent_f1', intent_f1)
self.log('slot_precision', slot_precision)
self.log('slot_recall', slot_recall)
self.log('slot_f1', slot_f1)
self.intent_classification_report.reset()
self.slot_classification_report.reset()
return {
'val_loss': avg_loss,
'intent_precision': intent_precision,
'intent_recall': intent_recall,
'intent_f1': intent_f1,
'slot_precision': slot_precision,
'slot_recall': slot_recall,
'slot_f1': slot_f1,
}
def test_step(self, batch, batch_idx):
"""
Lightning calls this inside the test loop with the data from the test dataloader
passed in as `batch`.
"""
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
"""
Called at the end of test to aggregate outputs.
:param outputs: list of individual outputs of each test step.
"""
return self.validation_epoch_end(outputs)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config)
def _setup_dataloader_from_config(self, cfg: DictConfig):
input_file = f'{self.data_dir}/{cfg.prefix}.tsv'
slot_file = f'{self.data_dir}/{cfg.prefix}_slots.tsv'
if not (os.path.exists(input_file) and os.path.exists(slot_file)):
raise FileNotFoundError(
f'{input_file} or {slot_file} not found. Please refer to the documentation for the right format \
of Intents and Slots files.'
)
dataset = IntentSlotClassificationDataset(
input_file=input_file,
slot_file=slot_file,
tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
num_samples=cfg.num_samples,
pad_label=self.cfg.data_desc.pad_label,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
return DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
collate_fn=dataset.collate_fn,
)
def _setup_infer_dataloader(self, queries: List[str], test_ds) -> 'torch.utils.data.DataLoader':
"""
Setup function for a infer data loader.
Args:
queries: text
batch_size: batch size to use during inference
Returns:
A pytorch DataLoader.
"""
dataset = IntentSlotInferenceDataset(
tokenizer=self.tokenizer, queries=queries, max_seq_length=-1, do_lower_case=False
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=test_ds.batch_size,
shuffle=test_ds.shuffle,
num_workers=test_ds.num_workers,
pin_memory=test_ds.pin_memory,
drop_last=test_ds.drop_last,
)
def predict_from_examples(self, queries: List[str], test_ds) -> List[List[str]]:
"""
Get prediction for the queries (intent and slots)
Args:
queries: text sequences
test_ds: Dataset configuration section.
Returns:
predicted_intents, predicted_slots: model intent and slot predictions
"""
predicted_intents = []
predicted_slots = []
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Retrieve intent and slot vocabularies from configuration.
intent_labels = self.cfg.data_desc.intent_labels
slot_labels = self.cfg.data_desc.slot_labels
# Initialize tokenizer.
# if not hasattr(self, "tokenizer"):
# self._setup_tokenizer(self.cfg.tokenizer)
# Initialize modules.
# self._reconfigure_classifier()
# Switch model to evaluation mode
self.eval()
self.to(device)
# Dataset.
infer_datalayer = self._setup_infer_dataloader(queries, test_ds)
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, loss_mask, subtokens_mask = batch
intent_logits, slot_logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
# predict intents and slots for these examples
# intents
intent_preds = tensor2list(torch.argmax(intent_logits, axis=-1))
# convert numerical outputs to Intent and Slot labels from the dictionaries
for intent_num in intent_preds:
if intent_num < len(intent_labels):
predicted_intents.append(intent_labels[int(intent_num)])
else:
# should not happen
predicted_intents.append("Unknown Intent")
# slots
slot_preds = torch.argmax(slot_logits, axis=-1)
for slot_preds_query, mask_query in zip(slot_preds, subtokens_mask):
query_slots = ''
for slot, mask in zip(slot_preds_query, mask_query):
if mask == 1:
if slot < len(slot_labels):
query_slots += slot_labels[int(slot)] + ' '
else:
query_slots += 'Unknown_slot '
predicted_slots.append(query_slots.strip())
finally:
# set mode back to its original value
self.train(mode=mode)
return predicted_intents, predicted_slots
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="Joint_Intent_Slot_Assistant",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemonlpmodels/versions/1.0.0a5/files/Joint_Intent_Slot_Assistant.nemo",
description="This models is trained on this https://github.com/xliuhw/NLU-Evaluation-Data dataset which includes 64 various intents and 55 slots. Final Intent accuracy is about 87%, Slot accuracy is about 89%.",
)
result.append(model)
return result
| 42.006148 | 223 | 0.655495 |
4a230f32efe95095bb6de392b4d6fb34fff5f4f6 | 1,518 | py | Python | src/kvt/losses/arcface.py | Ynakatsuka/birdclef-2021 | d7cf7b39e3164a75547ee50cc9a29bd5ed4c29bd | [
"BSD-2-Clause"
] | 6 | 2021-06-02T01:40:27.000Z | 2022-03-04T05:00:52.000Z | src/kvt/losses/arcface.py | Ynakatsuka/birdclef-2021 | d7cf7b39e3164a75547ee50cc9a29bd5ed4c29bd | [
"BSD-2-Clause"
] | null | null | null | src/kvt/losses/arcface.py | Ynakatsuka/birdclef-2021 | d7cf7b39e3164a75547ee50cc9a29bd5ed4c29bd | [
"BSD-2-Clause"
] | null | null | null | import math
import torch
import torch.nn.functional as F
from torch import nn
class DenseCrossEntropy(nn.Module):
def forward(self, x, target, reduction="mean"):
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
loss = -logprobs * target
loss = loss.sum(-1)
if reduction == "mean":
return loss.mean()
elif reduction == "sum":
return loss.sum()
elif reduction == "none":
return loss
class ArcFaceLoss(nn.modules.Module):
def __init__(self, num_classes, s=30.0, m=0.5, reduction="mean"):
super().__init__()
self.reduction = reduction
self.s = s
self.cos_m = math.cos(m) # 0.87758
self.sin_m = math.sin(m) # 0.47943
self.th = math.cos(math.pi - m) # -0.87758
self.mm = math.sin(math.pi - m) * m # 0.23971
self.num_classes = num_classes
def forward(self, logits, labels):
labels = F.one_hot(labels, self.num_classes)
logits = logits.float() # float16 to float32 (if used float16)
cosine = logits
sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) # equals to **2
phi = cosine * self.cos_m - sine * self.sin_m
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
output = (labels * phi) + ((1.0 - labels) * cosine)
output *= self.s
loss = DenseCrossEntropy()(output, labels, self.reduction)
return loss / 2
| 32.297872 | 71 | 0.577734 |
4a230fd6b4eab0e7a90e7a3c3d460a727645311d | 449 | py | Python | pproxy/__doc__.py | NellyD3v/python-proxy | 7fccf8dd62204f34b0aa3a70fc568fd6ddff7728 | [
"MIT"
] | 1 | 2021-01-12T04:29:45.000Z | 2021-01-12T04:29:45.000Z | pproxy/__doc__.py | NellyD3v/python-proxy | 7fccf8dd62204f34b0aa3a70fc568fd6ddff7728 | [
"MIT"
] | null | null | null | pproxy/__doc__.py | NellyD3v/python-proxy | 7fccf8dd62204f34b0aa3a70fc568fd6ddff7728 | [
"MIT"
] | null | null | null | __title__ = "pproxy"
__version__ = "2.3.7"
__license__ = "MIT"
__description__ = "Proxy server that can tunnel among remote servers by regex rules."
__keywords__ = "proxy socks http shadowsocks shadowsocksr ssr redirect pf tunnel cipher ssl udp"
__author__ = "Qian Wenjie"
__email__ = "[email protected]"
__url__ = "https://github.com/qwj/python-proxy"
__all__ = ['__version__', '__description__', '__url__']
| 40.818182 | 99 | 0.699332 |
4a231001413bec79943339bbb43fe7e8613cfde6 | 1,395 | py | Python | kitkopt/utilities.py | jgolebiowski/kitkopt | 0b46d38004b75799dd1e8603a445b1d711c03735 | [
"MIT"
] | 6 | 2018-11-19T09:49:19.000Z | 2020-01-16T00:21:14.000Z | kitkopt/utilities.py | jgolebiowski/kitkopt | 0b46d38004b75799dd1e8603a445b1d711c03735 | [
"MIT"
] | null | null | null | kitkopt/utilities.py | jgolebiowski/kitkopt | 0b46d38004b75799dd1e8603a445b1d711c03735 | [
"MIT"
] | 1 | 2019-02-20T19:57:47.000Z | 2019-02-20T19:57:47.000Z | import functools
import sys
import time
def debugtool(some_function):
"""
Wrapper that launches a post mortem pdb debugger on errors in the function
"""
@functools.wraps(some_function)
def wrapper(*args, **kwargs):
try:
return some_function(*args, **kwargs)
except:
import pdb
type, value, traceback = sys.exc_info()
print(type, value, traceback)
pdb.post_mortem(traceback)
return wrapper
def profile(some_function):
"""
Wrapper that profiles the time spent in a function
"""
@functools.wraps(some_function)
def wrapper(*args, **kwargs):
started_at = time.time()
some_function(*args, **kwargs)
print("Function {} took {:.4e}s".format(some_function.__name__, time.time() - started_at))
return wrapper
'''
#--- When you use a decorator, you're replacing one function with another.
#--- In other words, if you have a decorator
def logged(func):
def with_logging(*args, **kwargs):
print(func.__name__ + " was called")
return func(*args, **kwargs)
return with_logging
#--- then when you say
@logged
def f(x):
"""does some math"""
return x + x * x
#---- it's exactly the same as saying
def f(x):
"""does some math"""
return x + x * x
f = logged(f)
'''
class OptimizerError(RuntimeError):
pass | 21.461538 | 98 | 0.618638 |
4a2310f1cf9c1e85c3f716308663aeb41eaf447c | 47,692 | py | Python | tests/sentry/event_manager/test_event_manager.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/event_manager/test_event_manager.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/event_manager/test_event_manager.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import logging
from sentry.utils.compat import mock
import pytest
import uuid
from collections import namedtuple
from datetime import datetime, timedelta
from django.utils import timezone
from time import time
from sentry import nodestore
from sentry.app import tsdb
from sentry.constants import MAX_VERSION_LENGTH
from sentry.eventstore.models import Event
from sentry.event_manager import HashDiscarded, EventManager, EventUser
from sentry.grouping.utils import hash_from_values
from sentry.models import (
Activity,
Environment,
ExternalIssue,
Group,
GroupEnvironment,
GroupHash,
GroupLink,
GroupRelease,
GroupResolution,
GroupStatus,
GroupTombstone,
Integration,
Release,
ReleaseProjectEnvironment,
OrganizationIntegration,
UserReport,
)
from sentry.utils.outcomes import Outcome
from sentry.testutils import assert_mock_called_once_with_partial, TestCase
from sentry.utils.data_filters import FilterStatKeys
from sentry.relay.config import get_project_config
def make_event(**kwargs):
result = {
"event_id": uuid.uuid1().hex,
"message": "foo",
"level": logging.ERROR,
"logger": "default",
"tags": [],
}
result.update(kwargs)
return result
class EventManagerTest(TestCase):
def make_release_event(self, release_name, project_id):
manager = EventManager(make_event(release=release_name))
manager.normalize()
event = manager.save(project_id)
return event
def test_similar_message_prefix_doesnt_group(self):
# we had a regression which caused the default hash to just be
# 'event.message' instead of '[event.message]' which caused it to
# generate a hash per letter
manager = EventManager(make_event(event_id="a", message="foo bar"))
manager.normalize()
event1 = manager.save(1)
manager = EventManager(make_event(event_id="b", message="foo baz"))
manager.normalize()
event2 = manager.save(1)
assert event1.group_id != event2.group_id
def test_ephemeral_interfaces_removed_on_save(self):
manager = EventManager(make_event(platform="python"))
manager.normalize()
event = manager.save(1)
group = event.group
assert group.platform == "python"
assert event.platform == "python"
@mock.patch("sentry.event_manager.eventstream.insert")
def test_dupe_message_id(self, eventstream_insert):
# Saves the latest event to nodestore and eventstream
project_id = 1
event_id = "a" * 32
node_id = Event.generate_node_id(project_id, event_id)
manager = EventManager(make_event(event_id=event_id, message="first"))
manager.normalize()
manager.save(project_id)
assert nodestore.get(node_id)["logentry"]["formatted"] == "first"
manager = EventManager(make_event(event_id=event_id, message="second"))
manager.normalize()
manager.save(project_id)
assert nodestore.get(node_id)["logentry"]["formatted"] == "second"
assert eventstream_insert.call_count == 2
def test_updates_group(self):
timestamp = time() - 300
manager = EventManager(
make_event(message="foo", event_id="a" * 32, checksum="a" * 32, timestamp=timestamp)
)
manager.normalize()
event = manager.save(1)
manager = EventManager(
make_event(
message="foo bar", event_id="b" * 32, checksum="a" * 32, timestamp=timestamp + 2.0
)
)
manager.normalize()
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen == event2.datetime
assert group.message == event2.message
assert group.data.get("type") == "default"
assert group.data.get("metadata") == {"title": "foo bar"}
def test_updates_group_with_fingerprint(self):
ts = time() - 200
manager = EventManager(
make_event(message="foo", event_id="a" * 32, fingerprint=["a" * 32], timestamp=ts)
)
with self.tasks():
event = manager.save(1)
manager = EventManager(
make_event(message="foo bar", event_id="b" * 32, fingerprint=["a" * 32], timestamp=ts)
)
with self.tasks():
event2 = manager.save(1)
group = Group.objects.get(id=event.group_id)
assert group.times_seen == 2
assert group.last_seen == event.datetime
assert group.message == event2.message
def test_differentiates_with_fingerprint(self):
manager = EventManager(
make_event(message="foo", event_id="a" * 32, fingerprint=["{{ default }}", "a" * 32])
)
with self.tasks():
manager.normalize()
event = manager.save(1)
manager = EventManager(
make_event(message="foo bar", event_id="b" * 32, fingerprint=["a" * 32])
)
with self.tasks():
manager.normalize()
event2 = manager.save(1)
assert event.group_id != event2.group_id
def test_unresolves_group(self):
ts = time() - 300
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1.
manager = EventManager(make_event(event_id="a" * 32, checksum="a" * 32, timestamp=ts))
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(make_event(event_id="b" * 32, checksum="a" * 32, timestamp=ts + 50))
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert not group.is_resolved()
@mock.patch("sentry.event_manager.plugin_is_regression")
def test_does_not_unresolve_group(self, plugin_is_regression):
# N.B. EventManager won't unresolve the group unless the event2 has a
# later timestamp than event1.
plugin_is_regression.return_value = False
manager = EventManager(
make_event(event_id="a" * 32, checksum="a" * 32, timestamp=1403007314)
)
with self.tasks():
manager.normalize()
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
group.status = GroupStatus.RESOLVED
group.save()
assert group.is_resolved()
manager = EventManager(
make_event(event_id="b" * 32, checksum="a" * 32, timestamp=1403007315)
)
manager.normalize()
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=group.id)
assert group.is_resolved()
@mock.patch("sentry.tasks.activity.send_activity_notifications.delay")
@mock.patch("sentry.event_manager.plugin_is_regression")
def test_marks_as_unresolved_with_new_release(
self, plugin_is_regression, mock_send_activity_notifications_delay
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version="a",
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id="a" * 32,
checksum="a" * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(release=old_release, group=group)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={"version": ""},
)
manager = EventManager(
make_event(
event_id="b" * 32, checksum="a" * 32, timestamp=time(), release=old_release.version
)
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data["version"] == ""
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(event_id="c" * 32, checksum="a" * 32, timestamp=time(), release="b")
)
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data["version"] == "b"
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(group=group, type=Activity.SET_REGRESSION)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch("sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound")
@mock.patch("sentry.tasks.activity.send_activity_notifications.delay")
@mock.patch("sentry.event_manager.plugin_is_regression")
def test_marks_as_unresolved_with_new_release_with_integration(
self,
plugin_is_regression,
mock_send_activity_notifications_delay,
mock_sync_status_outbound,
):
plugin_is_regression.return_value = True
old_release = Release.objects.create(
version="a",
organization_id=self.project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(self.project)
manager = EventManager(
make_event(
event_id="a" * 32,
checksum="a" * 32,
timestamp=time() - 50000, # need to work around active_at
release=old_release.version,
)
)
event = manager.save(1)
group = event.group
org = group.organization
integration = Integration.objects.create(provider="example", name="Example")
integration.add_organization(org, self.user)
OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=group.organization.id
).update(
config={
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
)
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id, integration_id=integration.id, key="APP-%s" % group.id
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
group.update(status=GroupStatus.RESOLVED)
resolution = GroupResolution.objects.create(release=old_release, group=group)
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
data={"version": ""},
)
manager = EventManager(
make_event(
event_id="b" * 32, checksum="a" * 32, timestamp=time(), release=old_release.version
)
)
with self.tasks():
with self.feature({"organizations:integrations-issue-sync": True}):
event = manager.save(1)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data["version"] == ""
assert GroupResolution.objects.filter(group=group).exists()
manager = EventManager(
make_event(event_id="c" * 32, checksum="a" * 32, timestamp=time(), release="b")
)
event = manager.save(1)
mock_sync_status_outbound.assert_called_once_with(
external_issue, False, event.group.project_id
)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
activity = Activity.objects.get(id=activity.id)
assert activity.data["version"] == "b"
assert not GroupResolution.objects.filter(group=group).exists()
activity = Activity.objects.get(group=group, type=Activity.SET_REGRESSION)
mock_send_activity_notifications_delay.assert_called_once_with(activity.id)
@mock.patch("sentry.tasks.activity.send_activity_notifications.delay")
@mock.patch("sentry.event_manager.plugin_is_regression")
def test_does_not_mark_as_unresolved_with_pending_commit(
self, plugin_is_regression, mock_send_activity_notifications_delay
):
plugin_is_regression.return_value = True
repo = self.create_repo(project=self.project)
commit = self.create_commit(repo=repo)
manager = EventManager(
make_event(
event_id="a" * 32,
checksum="a" * 32,
timestamp=time() - 50000, # need to work around active_at
)
)
event = manager.save(self.project.id)
group = event.group
group.update(status=GroupStatus.RESOLVED)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_id=commit.id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
)
manager = EventManager(make_event(event_id="b" * 32, checksum="a" * 32, timestamp=time()))
event = manager.save(self.project.id)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.RESOLVED
@mock.patch("sentry.tasks.activity.send_activity_notifications.delay")
@mock.patch("sentry.event_manager.plugin_is_regression")
def test_mark_as_unresolved_with_released_commit(
self, plugin_is_regression, mock_send_activity_notifications_delay
):
plugin_is_regression.return_value = True
release = self.create_release(project=self.project)
repo = self.create_repo(project=self.project)
commit = self.create_commit(repo=repo, release=release, project=self.project)
manager = EventManager(
make_event(
event_id="a" * 32,
checksum="a" * 32,
timestamp=time() - 50000, # need to work around active_at
)
)
event = manager.save(self.project.id)
group = event.group
group.update(status=GroupStatus.RESOLVED)
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_id=commit.id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
)
manager = EventManager(make_event(event_id="b" * 32, checksum="a" * 32, timestamp=time()))
event = manager.save(self.project.id)
assert event.group_id == group.id
group = Group.objects.get(id=group.id)
assert group.status == GroupStatus.UNRESOLVED
@mock.patch("sentry.models.Group.is_resolved")
def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
ts = time() - 100
mock_is_resolved.return_value = False
manager = EventManager(make_event(event_id="a" * 32, checksum="a" * 32, timestamp=ts))
with self.tasks():
event = manager.save(1)
mock_is_resolved.return_value = True
manager = EventManager(make_event(event_id="b" * 32, checksum="a" * 32, timestamp=ts + 100))
with self.tasks():
event2 = manager.save(1)
assert event.group_id == event2.group_id
group = Group.objects.get(id=event.group.id)
assert group.active_at.replace(second=0) == event2.datetime.replace(second=0)
assert group.active_at.replace(second=0) != event.datetime.replace(second=0)
def test_invalid_transaction(self):
dict_input = {"messages": "foo"}
manager = EventManager(make_event(transaction=dict_input))
manager.normalize()
event = manager.save(1)
assert event.transaction is None
def test_transaction_as_culprit(self):
manager = EventManager(make_event(transaction="foobar"))
manager.normalize()
event = manager.save(1)
assert event.transaction == "foobar"
assert event.culprit == "foobar"
def test_culprit_is_not_transaction(self):
manager = EventManager(make_event(culprit="foobar"))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction is None
assert event1.culprit == "foobar"
def test_inferred_culprit_from_empty_stacktrace(self):
manager = EventManager(make_event(stacktrace={"frames": []}))
manager.normalize()
event = manager.save(1)
assert event.culprit == ""
def test_transaction_and_culprit(self):
manager = EventManager(make_event(transaction="foobar", culprit="baz"))
manager.normalize()
event1 = manager.save(1)
assert event1.transaction == "foobar"
assert event1.culprit == "baz"
def test_first_release(self):
project_id = 1
event = self.make_release_event("1.0", project_id)
group = event.group
assert group.first_release.version == "1.0"
event = self.make_release_event("2.0", project_id)
group = event.group
assert group.first_release.version == "1.0"
def test_release_project_slug(self):
project = self.create_project(name="foo")
release = Release.objects.create(version="foo-1.0", organization=project.organization)
release.add_project(project)
event = self.make_release_event("1.0", project.id)
group = event.group
assert group.first_release.version == "foo-1.0"
release_tag = [v for k, v in event.tags if k == "sentry:release"][0]
assert release_tag == "foo-1.0"
event = self.make_release_event("2.0", project.id)
group = event.group
assert group.first_release.version == "foo-1.0"
def test_release_project_slug_long(self):
project = self.create_project(name="foo")
partial_version_len = MAX_VERSION_LENGTH - 4
release = Release.objects.create(
version="foo-%s" % ("a" * partial_version_len,), organization=project.organization
)
release.add_project(project)
event = self.make_release_event("a" * partial_version_len, project.id)
group = event.group
assert group.first_release.version == "foo-%s" % ("a" * partial_version_len,)
release_tag = [v for k, v in event.tags if k == "sentry:release"][0]
assert release_tag == "foo-%s" % ("a" * partial_version_len,)
def test_group_release_no_env(self):
project_id = 1
event = self.make_release_event("1.0", project_id)
release = Release.objects.get(version="1.0", projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id, group_id=event.group_id, environment=""
).exists()
# ensure we're not erroring on second creation
event = self.make_release_event("1.0", project_id)
def test_group_release_with_env(self):
manager = EventManager(make_event(release="1.0", environment="prod", event_id="a" * 32))
manager.normalize()
event = manager.save(1)
release = Release.objects.get(version="1.0", projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id, group_id=event.group_id, environment="prod"
).exists()
manager = EventManager(make_event(release="1.0", environment="staging", event_id="b" * 32))
event = manager.save(1)
release = Release.objects.get(version="1.0", projects=event.project_id)
assert GroupRelease.objects.filter(
release_id=release.id, group_id=event.group_id, environment="staging"
).exists()
def test_tsdb(self):
project = self.project
manager = EventManager(
make_event(
fingerprint=["totally unique super duper fingerprint"],
environment="totally unique super duper environment",
)
)
event = manager.save(project.id)
def query(model, key, **kwargs):
return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
assert query(tsdb.models.project, project.id) == 1
assert query(tsdb.models.group, event.group.id) == 1
environment_id = Environment.get_for_organization_id(
event.project.organization_id, "totally unique super duper environment"
).id
assert query(tsdb.models.project, project.id, environment_id=environment_id) == 1
assert query(tsdb.models.group, event.group.id, environment_id=environment_id) == 1
@pytest.mark.xfail
def test_record_frequencies(self):
project = self.project
manager = EventManager(make_event())
event = manager.save(project.id)
assert tsdb.get_most_frequent(
tsdb.models.frequent_issues_by_project, (event.project.id,), event.datetime
) == {event.project.id: [(event.group_id, 1.0)]}
def test_event_user(self):
manager = EventManager(
make_event(
event_id="a", environment="totally unique environment", **{"user": {"id": "1"}}
)
)
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
environment_id = Environment.get_for_organization_id(
event.project.organization_id, "totally unique environment"
).id
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group, (event.group.id,), event.datetime, event.datetime
) == {event.group.id: 1}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id,),
event.datetime,
event.datetime,
) == {event.project.id: 1}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_group,
(event.group.id,),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {event.group.id: 1}
assert tsdb.get_distinct_counts_totals(
tsdb.models.users_affected_by_project,
(event.project.id,),
event.datetime,
event.datetime,
environment_id=environment_id,
) == {event.project.id: 1}
euser = EventUser.objects.get(project_id=self.project.id, ident="1")
assert event.get_tag("sentry:user") == euser.tag_value
# ensure event user is mapped to tags in second attempt
manager = EventManager(make_event(event_id="b", **{"user": {"id": "1", "name": "jane"}}))
manager.normalize()
with self.tasks():
event = manager.save(self.project.id)
euser = EventUser.objects.get(id=euser.id)
assert event.get_tag("sentry:user") == euser.tag_value
assert euser.name == "jane"
assert euser.ident == "1"
def test_event_user_invalid_ip(self):
manager = EventManager(
make_event(
event_id="a", environment="totally unique environment", **{"user": {"id": "1"}}
)
)
manager.normalize()
# This can happen as part of PII stripping, which happens after normalization
manager._data["user"]["ip_address"] = "[ip]"
with self.tasks():
manager.save(self.project.id)
euser = EventUser.objects.get(project_id=self.project.id)
assert euser.ip_address is None
def test_event_user_unicode_identifier(self):
manager = EventManager(make_event(**{"user": {"username": u"foô"}}))
manager.normalize()
with self.tasks():
manager.save(self.project.id)
euser = EventUser.objects.get(project_id=self.project.id)
assert euser.username == u"foô"
def test_environment(self):
manager = EventManager(make_event(**{"environment": "beta"}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get("environment") == "beta"
def test_invalid_environment(self):
manager = EventManager(make_event(**{"environment": "bad/name"}))
manager.normalize()
event = manager.save(self.project.id)
assert dict(event.tags).get("environment") is None
def test_invalid_tags(self):
manager = EventManager(make_event(**{"tags": [42]}))
manager.normalize()
assert None in manager.get_data().get("tags", [])
assert 42 not in manager.get_data().get("tags", [])
event = manager.save(self.project.id)
assert 42 not in event.tags
assert None not in event.tags
@mock.patch("sentry.event_manager.eventstream.insert")
def test_group_environment(self, eventstream_insert):
release_version = "1.0"
def save_event():
manager = EventManager(
make_event(
**{
"event_id": uuid.uuid1().hex,
"environment": "beta",
"release": release_version,
}
)
)
manager.normalize()
return manager.save(self.project.id)
event = save_event()
# Ensure the `GroupEnvironment` record was created.
instance = GroupEnvironment.objects.get(
group_id=event.group_id,
environment_id=Environment.objects.get(
organization_id=self.project.organization_id, name=event.get_tag("environment")
).id,
)
assert Release.objects.get(id=instance.first_release_id).version == release_version
# Ensure that the first event in the (group, environment) pair is
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=True,
is_regression=False,
is_new_group_environment=True,
primary_hash="acbd18db4cc2f85cedef654fccc4a4d8",
skip_consume=False,
received_timestamp=event.data["received"],
)
event = save_event()
# Ensure that the next event in the (group, environment) pair is *not*
# marked as being part of a new environment.
eventstream_insert.assert_called_with(
group=event.group,
event=event,
is_new=False,
is_regression=None, # XXX: wut
is_new_group_environment=False,
primary_hash="acbd18db4cc2f85cedef654fccc4a4d8",
skip_consume=False,
received_timestamp=event.data["received"],
)
def test_default_fingerprint(self):
manager = EventManager(make_event())
manager.normalize()
event = manager.save(self.project.id)
assert event.data.get("fingerprint") == ["{{ default }}"]
def test_user_report_gets_environment(self):
project = self.create_project()
environment = Environment.objects.create(
project_id=project.id, organization_id=project.organization_id, name="production"
)
environment.add_project(project)
event_id = "a" * 32
UserReport.objects.create(
project=project,
event_id=event_id,
name="foo",
email="[email protected]",
comments="It Broke!!!",
)
self.store_event(
data=make_event(environment=environment.name, event_id=event_id), project_id=project.id
)
assert UserReport.objects.get(event_id=event_id).environment == environment
def test_default_event_type(self):
manager = EventManager(make_event(message="foo bar"))
manager.normalize()
data = manager.get_data()
assert data["type"] == "default"
event = manager.save(self.project.id)
group = event.group
assert group.data.get("type") == "default"
assert group.data.get("metadata") == {"title": "foo bar"}
def test_message_event_type(self):
manager = EventManager(
make_event(
**{
"message": "",
"logentry": {"formatted": "foo bar", "message": "foo %s", "params": ["bar"]},
}
)
)
manager.normalize()
data = manager.get_data()
assert data["type"] == "default"
event = manager.save(self.project.id)
group = event.group
assert group.data.get("type") == "default"
assert group.data.get("metadata") == {"title": "foo bar"}
def test_error_event_type(self):
manager = EventManager(
make_event(**{"exception": {"values": [{"type": "Foo", "value": "bar"}]}})
)
manager.normalize()
data = manager.get_data()
assert data["type"] == "error"
event = manager.save(self.project.id)
group = event.group
assert group.data.get("type") == "error"
assert group.data.get("metadata") == {"type": "Foo", "value": "bar"}
def test_csp_event_type(self):
manager = EventManager(
make_event(
**{
"csp": {
"effective_directive": "script-src",
"blocked_uri": "http://example.com",
}
}
)
)
manager.normalize()
data = manager.get_data()
assert data["type"] == "csp"
event = manager.save(self.project.id)
group = event.group
assert group.data.get("type") == "csp"
assert group.data.get("metadata") == {
"directive": "script-src",
"uri": "example.com",
"message": "Blocked 'script' from 'example.com'",
}
def test_transaction_event_type(self):
manager = EventManager(
make_event(
**{
"transaction": "wait",
"contexts": {
"trace": {
"parent_span_id": "bce14471e0e9654d",
"op": "foobar",
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "bf5be759039ede9a",
}
},
"spans": [],
"timestamp": "2019-06-14T14:01:40Z",
"start_timestamp": "2019-06-14T14:01:40Z",
"type": "transaction",
}
)
)
manager.normalize()
data = manager.get_data()
assert data["type"] == "transaction"
def test_sdk(self):
manager = EventManager(make_event(**{"sdk": {"name": "sentry-unity", "version": "1.0"}}))
manager.normalize()
event = manager.save(self.project.id)
assert event.data["sdk"] == {
"name": "sentry-unity",
"version": "1.0",
"integrations": None,
"packages": None,
}
def test_no_message(self):
# test that the message is handled gracefully
manager = EventManager(
make_event(**{"message": None, "logentry": {"message": "hello world"}})
)
manager.normalize()
event = manager.save(self.project.id)
assert event.message == "hello world"
def test_search_message(self):
manager = EventManager(
make_event(
**{
"message": "test",
"logentry": {"message": "hello world"},
"transaction": "sentry.tasks.process",
}
)
)
manager.normalize()
event = manager.save(self.project.id)
assert event.search_message == "hello world sentry.tasks.process"
def test_stringified_message(self):
manager = EventManager(make_event(**{"message": 1234}))
manager.normalize()
event = manager.save(self.project.id)
assert event.data["logentry"] == {"formatted": "1234", "message": None, "params": None}
def test_bad_message(self):
# test that invalid messages are rejected
manager = EventManager(make_event(**{"message": ["asdf"]}))
manager.normalize()
event = manager.save(self.project.id)
assert event.message == '["asdf"]'
assert "logentry" in event.data
def test_message_attribute_goes_to_interface(self):
manager = EventManager(make_event(**{"message": "hello world"}))
manager.normalize()
event = manager.save(self.project.id)
assert event.data["logentry"] == {
"formatted": "hello world",
"message": None,
"params": None,
}
def test_message_attribute_shadowing(self):
# Logentry shadows the legacy message attribute.
manager = EventManager(
make_event(**{"message": "world hello", "logentry": {"message": "hello world"}})
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data["logentry"] == {
"formatted": "hello world",
"message": None,
"params": None,
}
def test_message_attribute_interface_both_strings(self):
manager = EventManager(
make_event(**{"logentry": "a plain string", "message": "another string"})
)
manager.normalize()
event = manager.save(self.project.id)
assert event.data["logentry"] == {
"formatted": "a plain string",
"message": None,
"params": None,
}
def test_throws_when_matches_discarded_hash(self):
manager = EventManager(make_event(message="foo", event_id="a" * 32, fingerprint=["a" * 32]))
with self.tasks():
event = manager.save(1)
group = Group.objects.get(id=event.group_id)
tombstone = GroupTombstone.objects.create(
project_id=group.project_id,
level=group.level,
message=group.message,
culprit=group.culprit,
data=group.data,
previous_group_id=group.id,
)
GroupHash.objects.filter(group=group).update(group=None, group_tombstone_id=tombstone.id)
manager = EventManager(make_event(message="foo", event_id="b" * 32, fingerprint=["a" * 32]))
from sentry.utils.outcomes import track_outcome
mock_track_outcome = mock.Mock(wraps=track_outcome)
with mock.patch("sentry.event_manager.track_outcome", mock_track_outcome):
with self.tasks():
with self.assertRaises(HashDiscarded):
event = manager.save(1)
assert_mock_called_once_with_partial(
mock_track_outcome, outcome=Outcome.FILTERED, reason=FilterStatKeys.DISCARDED_HASH
)
def query(model, key, **kwargs):
return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]
# Ensure that we incremented TSDB counts
assert query(tsdb.models.organization_total_received, event.project.organization.id) == 2
assert query(tsdb.models.project_total_received, event.project.id) == 2
assert query(tsdb.models.project, event.project.id) == 1
assert query(tsdb.models.group, event.group.id) == 1
assert query(tsdb.models.organization_total_blacklisted, event.project.organization.id) == 1
assert query(tsdb.models.project_total_blacklisted, event.project.id) == 1
def test_event_accepted_outcome(self):
manager = EventManager(make_event(message="foo"))
manager.normalize()
mock_track_outcome = mock.Mock()
with mock.patch("sentry.event_manager.track_outcome", mock_track_outcome):
manager.save(1)
assert_mock_called_once_with_partial(mock_track_outcome, outcome=Outcome.ACCEPTED)
def test_checksum_rehashed(self):
checksum = "invalid checksum hash"
manager = EventManager(make_event(**{"checksum": checksum}))
manager.normalize()
event = manager.save(self.project.id)
hashes = [gh.hash for gh in GroupHash.objects.filter(group=event.group)]
assert sorted(hashes) == sorted([hash_from_values(checksum), checksum])
@mock.patch("sentry.event_manager.is_valid_error_message")
def test_should_filter_message(self, mock_is_valid_error_message):
TestItem = namedtuple("TestItem", "value formatted result")
items = [
TestItem({"type": "UnfilteredException"}, "UnfilteredException", True),
TestItem(
{"value": "This is an unfiltered exception."},
"This is an unfiltered exception.",
True,
),
TestItem(
{"type": "UnfilteredException", "value": "This is an unfiltered exception."},
"UnfilteredException: This is an unfiltered exception.",
True,
),
TestItem(
{"type": "FilteredException", "value": "This is a filtered exception."},
"FilteredException: This is a filtered exception.",
False,
),
]
data = {"exception": {"values": [item.value for item in items]}}
project_config = get_project_config(self.project)
manager = EventManager(data, project=self.project, project_config=project_config)
mock_is_valid_error_message.side_effect = [item.result for item in items]
assert manager.should_filter() == (True, FilterStatKeys.ERROR_MESSAGE)
assert mock_is_valid_error_message.call_args_list == [
mock.call(project_config, item.formatted) for item in items
]
def test_legacy_attributes_moved(self):
event = make_event(
release="my-release",
environment="my-environment",
site="whatever",
server_name="foo.com",
event_id=uuid.uuid1().hex,
)
manager = EventManager(event)
event = manager.save(1)
# release and environment stay toplevel
assert event.data["release"] == "my-release"
assert event.data["environment"] == "my-environment"
# site is a legacy attribute that is just a tag
assert event.data.get("site") is None
tags = dict(event.tags)
assert tags["site"] == "whatever"
assert event.data.get("server_name") is None
tags = dict(event.tags)
assert tags["server_name"] == "foo.com"
def test_save_issueless_event(self):
manager = EventManager(
make_event(
transaction="wait",
contexts={
"trace": {
"parent_span_id": "bce14471e0e9654d",
"op": "foobar",
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "bf5be759039ede9a",
}
},
spans=[],
timestamp="2019-06-14T14:01:40Z",
start_timestamp="2019-06-14T14:01:40Z",
type="transaction",
platform="python",
)
)
event = manager.save(self.project.id)
assert event.group is None
assert (
tsdb.get_sums(tsdb.models.project, [self.project.id], event.datetime, event.datetime)[
self.project.id
]
== 1
)
def test_fingerprint_ignored(self):
manager1 = EventManager(make_event(event_id="a" * 32, fingerprint="fingerprint1"))
event1 = manager1.save(self.project.id)
manager2 = EventManager(
make_event(
event_id="b" * 32,
fingerprint="fingerprint1",
transaction="wait",
contexts={
"trace": {
"parent_span_id": "bce14471e0e9654d",
"op": "foobar",
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "bf5be759039ede9a",
}
},
spans=[],
timestamp="2019-06-14T14:01:40Z",
start_timestamp="2019-06-14T14:01:40Z",
type="transaction",
platform="python",
)
)
event2 = manager2.save(self.project.id)
assert event1.group is not None
assert event2.group is None
assert (
tsdb.get_sums(tsdb.models.project, [self.project.id], event1.datetime, event1.datetime)[
self.project.id
]
== 2
)
assert (
tsdb.get_sums(tsdb.models.group, [event1.group.id], event1.datetime, event1.datetime)[
event1.group.id
]
== 1
)
class ReleaseIssueTest(TestCase):
def setUp(self):
self.project = self.create_project()
self.release = Release.get_or_create(self.project, "1.0")
self.environment1 = Environment.get_or_create(self.project, "prod")
self.environment2 = Environment.get_or_create(self.project, "staging")
self.timestamp = float(int(time() - 300))
def make_event(self, **kwargs):
result = {
"event_id": "a" * 32,
"message": "foo",
"timestamp": self.timestamp + 0.23,
"level": logging.ERROR,
"logger": "default",
"tags": [],
}
result.update(kwargs)
return result
def make_release_event(
self, release_version="1.0", environment_name="prod", project_id=1, **kwargs
):
event = make_event(
release=release_version, environment=environment_name, event_id=uuid.uuid1().hex
)
event.update(kwargs)
manager = EventManager(event)
with self.tasks():
event = manager.save(project_id)
return event
def convert_timestamp(self, timestamp):
date = datetime.fromtimestamp(timestamp)
date = date.replace(tzinfo=timezone.utc)
return date
def assert_release_project_environment(self, event, new_issues_count, first_seen, last_seen):
release = Release.objects.get(
organization=event.project.organization.id, version=event.get_tag("sentry:release")
)
release_project_envs = ReleaseProjectEnvironment.objects.filter(
release=release, project=event.project, environment=event.get_environment()
)
assert len(release_project_envs) == 1
release_project_env = release_project_envs[0]
assert release_project_env.new_issues_count == new_issues_count
assert release_project_env.first_seen == self.convert_timestamp(first_seen)
assert release_project_env.last_seen == self.convert_timestamp(last_seen)
def test_different_groups(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1, new_issues_count=1, last_seen=self.timestamp, first_seen=self.timestamp
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="b" * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=2,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1, new_issues_count=1, last_seen=self.timestamp, first_seen=self.timestamp
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp,
)
def test_same_group_different_environment(self):
event1 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment1.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp,
)
self.assert_release_project_environment(
event=event1, new_issues_count=1, last_seen=self.timestamp, first_seen=self.timestamp
)
event2 = self.make_release_event(
release_version=self.release.version,
environment_name=self.environment2.name,
project_id=self.project.id,
checksum="a" * 32,
timestamp=self.timestamp + 100,
)
self.assert_release_project_environment(
event=event1, new_issues_count=1, last_seen=self.timestamp, first_seen=self.timestamp
)
self.assert_release_project_environment(
event=event2,
new_issues_count=1,
last_seen=self.timestamp + 100,
first_seen=self.timestamp + 100,
)
| 36.048375 | 100 | 0.60096 |
4a23118f9a19b9d5c62130d94d29de602873c349 | 2,544 | py | Python | python/equeenInput.py | Fatizel/EightQueensPuzzle | d8fd190621be9d2e0cbc835df360024cc6e5b8d3 | [
"MIT"
] | null | null | null | python/equeenInput.py | Fatizel/EightQueensPuzzle | d8fd190621be9d2e0cbc835df360024cc6e5b8d3 | [
"MIT"
] | null | null | null | python/equeenInput.py | Fatizel/EightQueensPuzzle | d8fd190621be9d2e0cbc835df360024cc6e5b8d3 | [
"MIT"
] | null | null | null | import datetime
#import library for conection db
from sqlalchemy import create_engine
# Funtion for define if exist a queen in the dashboard
def verification(row, col, queens):
if not len(queens): return False
for queen in queens:
if not len(queen):
continue
r,c = queen
if r == row: return True # Check row
if c == col: return True # Check column
if (col-c) == (row-r): return True # Check left diagonal
if (col-c) == -(row-r): return True # Check right diagonal
return False
#Funtion for iterate in the rows solution.
def research(n):
numSolution = 0;
solutions = None
for row in range(1, n+1):
# for each row, check all valid column
solutions, numSolution = inspection(solutions, row, n, numSolution)
return solutions, numSolution
# Funtion that evaluate the solution for iteration
def inspection(solutions, row, n, numSolution):
#Matrix for solutions
nSolution = []
#Num of solution for iteration
numSolution = 0
for col in range(1, n+1):
if not solutions or not len(solutions):
nSolution.append([] + [(row, col)])
else:
for solution in solutions:
if not verification(row, col, solution):
numSolution = numSolution + 1
nSolution.append(solution + [(row, col)])
return nSolution, numSolution
#Funtion for database conect
def dbconect(userN, pw, host, port,tName):
dbString = "postgresql://"+userN+":"+pw+"@"+host+":"+port;
db = create_engine(dbString)
db.execute("DROP TABLE IF EXISTS " + tName )
db.execute("DROP SCHEMA IF EXISTS "+ tName)
db.execute("CREATE TABLE IF NOT EXISTS "+ tName +"(queen text,iter text,dx text,dy text)")
return db
#Funtion for add data in table
def dbinsert(db,tName, queen, itera, dx, dy):
db.execute("INSERT INTO "+ tName +" (queen, iter, dx, dy) VALUES ("+queen+", "+itera+", "+dx+","+dy+")")
#reader = db.execute("SELECT * FROM "+ tName)
#for r in reader:
# print(r)
tName = "queenR"
queen = int(8)
current_time = datetime.datetime.now()
print(current_time)
db = dbconect("postgres","mysecretpassword","localhost","5432", tName)
solution, numSolution = research(queen)
a = solution
iTb = 0
for row in a:
iTb = iTb +1
for elem in row:
dbinsert(db,tName,str(queen),str(iTb),str(elem[0]),str(elem[1]))
current_time = datetime.datetime.now()
print(current_time)
print("Soluciones almacenadas: " + str(numSolution))
| 33.038961 | 108 | 0.639151 |
4a2312c1d6639d0e4c867b3a9c5a815a576e1d32 | 2,671 | py | Python | var/spack/repos/builtin/packages/r-genomicranges/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-11-28T10:14:14.000Z | 2019-11-28T10:14:14.000Z | var/spack/repos/builtin/packages/r-genomicranges/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-genomicranges/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2017-01-21T17:19:32.000Z | 2017-01-21T17:19:32.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGenomicranges(RPackage):
"""Representation and manipulation of genomic intervals.
The ability to efficiently represent and manipulate genomic annotations
and alignments is playing a central role when it comes to analyzing
high-throughput sequencing data (a.k.a. NGS data). The GenomicRanges
package defines general purpose containers for storing and manipulating
genomic intervals and variables defined along a genome. More specialized
containers for representing and manipulating short alignments against a
reference genome, or a matrix-like summarization of an experiment, are
defined in the GenomicAlignments and SummarizedExperiment packages,
respectively. Both packages build on top of the GenomicRanges
infrastructure."""
homepage = "https://bioconductor.org/packages/GenomicRanges"
git = "https://git.bioconductor.org/packages/GenomicRanges.git"
version('1.36.1', commit='418e7e5647dd54d81b804455ddfcbc027fd0164a')
version('1.34.0', commit='ebaad5ca61abb67c2c30c132e07531ba4257bccd')
version('1.32.7', commit='4c56dc836dbfd0d228dc810e8d401811cdbc267c')
version('1.30.3', commit='e99979054bc50ed8c0109bc54563036c1b368997')
version('1.28.6', commit='197472d618f3ed04c795dc6ed435500c29619563')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('[email protected]:', when='@1.30.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.30.3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.7:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.7:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.7:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.7:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.32.7:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.34.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.34.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.36.1:', type=('build', 'run'))
| 51.365385 | 80 | 0.681018 |
4a2312d1b20262e42760698917b2465f453e499f | 21,540 | py | Python | test/chain_lighting.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | 1 | 2017-10-04T12:21:20.000Z | 2017-10-04T12:21:20.000Z | test/chain_lighting.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | null | null | null | test/chain_lighting.py | nonemaw/YeTi | 92a3ba89f5b7fd8b2d5d3f5929ade0bf0b9e5cbe | [
"MIT"
] | null | null | null | import copy
import inspect
from functools import reduce
from itertools import islice
from reprlib import recursive_repr
from collections import Iterator, Iterable, OrderedDict, Mapping
class OrderedTable(OrderedDict):
"""
enabling dot operation for dict/OrderedDict
Example:
>>> t = OrderedTable({'key1': 1, 'key2': {'key3': 3, 'key4': 4}})
>>> t.key2.key5 = 5
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = OrderedTable(v) if isinstance(v, dict) else v
if kwargs:
for k, v in kwargs.items():
self[k] = OrderedTable(v) if isinstance(v, dict) else v
@recursive_repr()
def __repr__(self):
if not self:
return 'OrderedTable()'
return f'OrderedTable({list(self.items())})'
def __getattr__(self, item):
"""
enable
>>> t.key
"""
return self.get(item)
def __setattr__(self, key, value):
"""
enable
>>> t.key2 = 2
"""
# convert value to Table type before passing to __setitem__
if isinstance(value, dict):
value = OrderedTable(value)
self.__setitem__(key, value)
def __delattr__(self, item):
"""
enable
>>> del t.key3
"""
self.__delitem__(item)
def __setitem__(self, key, value):
"""
signature not matched, but it works good
"""
super().__setitem__(key, value)
self.__dict__.update({key: value})
def __delitem__(self, key):
"""
signature not matched, but it works good
"""
super().__delitem__(key)
del self.__dict__[key]
def update(self, m: Mapping = None, **kwargs):
"""
override dict's update method for Table: when updating mappings,
convert them into Table first rather than pass dict directly
"""
if m is not None:
for k, v in m.items() if isinstance(m, Mapping) else m:
if isinstance(v, dict):
v = OrderedTable(v)
self[k] = v
for k, v in kwargs.items():
if isinstance(v, dict):
v = OrderedTable(v)
self[k] = v
def append_keys(self, keys: Iterable, default=None):
for k in keys:
self.update({k: default})
def append(self, item=None, **kwargs):
"""
append one or multiple key-value pairs to the end of dict
the operation will be converted to update() if same key appears
"""
if item is not None:
if isinstance(item, list):
for i in item:
self.update(i)
elif isinstance(item, Mapping):
self.update(item)
for k, v in kwargs.items():
if isinstance(v, dict):
v = OrderedTable(v)
self[k] = v
def extend(self, items:Iterable):
"""
extend multiple key-value pairs to the end of dict
the operation will be converted to update() if same key appears
"""
for i in items:
self.update(i)
class CL:
"""
ChainLightning provides convenient function chain mechanisms, which also
allows you to modify Iterable/Iterator directly at same time
"""
def __init__(self, obj):
self.__obj = self.__assert_obj(obj)
self.__cache = copy.deepcopy(obj)
if isinstance(obj, Iterator):
self.type = 1 # type 1 is Iterator
else:
self.type = 2 # type 2 is Iterable
def __iter__(self):
if self.type == 1:
return copy.deepcopy(self.__obj)
return iter(self.__obj)
def __str__(self):
if self.type == 1:
self_obj = str(list(self))
else:
self_obj = str(self.__obj)
self_id = '0x{:02x}'.format(id(self))
if self.type == 1:
return f'<ChainLightning at {self_id}> value={self_obj} type=Iterator'
elif self.type == 2:
return f'<ChainLightning at {self_id}> value={self_obj} type=Iterable'
self_type = str(type(self.__obj))
return f'<ChainLightning at {self_id}> value={self_obj} type={self_type}'
def __repr__(self):
return str(self)
def __len__(self):
return self.reduce(func=lambda x, _: x + 1, initial=0)
def __enter__(self):
return iter(self)
def __call__(self, func_name: str, *args, **kwargs):
if hasattr(self, func_name):
return getattr(self, func_name)(*args, **kwargs)
def __getitem__(self, item):
"""
enable
>>> CL(iter([1, 2, 3, 4, 5]))[::2]
"""
if not isinstance(item, (int, slice)):
raise TypeError(
f'Indices must be an integer or slice, not {type(item)}'
)
# Iterable case, pass item directly
if self.type == 2:
return self.__obj.__getitem__(item)
# Iterator case
elif self.type == 1:
# slice case, "item" is a built-in slice object
if isinstance(item, slice):
res = islice(self, item.start, item.stop, item.step)
return res
# int case
else:
counter = 0
for i in self:
if counter == item:
return i
else:
counter += 1
else:
raise IndexError('Index out of range')
def __update_type_n_cache(self):
"""
after some operations an Iterator or Iterable might be converted
between each other, try to update type/cache after those changes
"""
self.__cache = copy.deepcopy(self.__obj)
if isinstance(self.__obj, Iterator):
self.type = 1
elif isinstance(self.__obj, Iterable):
self.type = 2
else:
self.type = 0
def __restore_from_cache(self):
"""
restore from cache for Iterators
"""
if self.type == 1:
self.__obj = copy.deepcopy(self.__cache)
def __assert_obj(self, obj):
try:
assert obj and isinstance(obj, (Iterator, Iterable))
except AssertionError:
obj = list(obj)
if isinstance(obj, dict) and not isinstance(obj, OrderedTable):
tmp = OrderedTable()
for key in obj:
tmp[key] = obj.get(key)
obj = tmp
return obj
def __assert_func(self, func, arg_num: int = None):
"""
assert whether the "func" is a valid function
if "arg_num" is given (not None), then assert if func's valid number of
parameters is legal
return: func's legal number of arguments
"""
if not callable(func):
raise TypeError('Argument "func" got a non-callable object')
# the given number of arg_num should pass one of the assert()
if arg_num is not None:
# _args is the total number of func's arguments
_args = len(inspect.getfullargspec(func).args)
# _kwargs is the total number of func's kw-arguments
try:
_kwargs = len(inspect.getfullargspec(func).defaults)
except:
_kwargs = 0
try:
assert _args - _kwargs == arg_num
return _args - _kwargs
except AssertionError:
try:
assert _args == arg_num
return _args
except AssertionError:
raise TypeError(
f'{func.__name__}() takes {_args} args and {_kwargs} kwargs, but expecting {arg_num} args'
)
###################### tools: ######################
# length() -> len(self)
# done() -> self.__obj
# get_iter() -> Iterator
# get_reverse() -> self.__obj
# get_sort() -> self.__obj
# has_duplicate() -> bool
# check() -> self.__obj
def length(self) -> int:
return len(self)
def done(self):
"""
retrieve self.__obj's value when user's operation is done, e.g.:
CL(xxx).sorted(x).map(x).filter(x).group_by(x).done()
"""
return self.__obj
def get_iter(self) -> Iterator:
"""
obtain an iterator from self
"""
return iter(self)
def get_reverse(self):
"""
obtain a reversed version of current object's value, and it won't
modify the object's value
"""
# an Iterable
if self.type == 2 and not isinstance(self.__obj, dict):
return reversed(self)
# an Iterator, not reversible, convert to list for reversed()
elif self.type == 1 and not isinstance(self.__obj, dict):
return reversed(list(self))
# a dict
elif isinstance(self.__obj, dict):
# FIXME: why I cannot use reversed(self) in here?
res = reversed(self.__obj)
tmp = OrderedTable()
for key in res:
tmp[key] = self.__obj.get(key)
return tmp
def get_sort(self, key=None, reverse=None):
"""
obtain a sorted version of current object's value, and it won't
modify the object's value
"""
if key:
self.__assert_func(key, arg_num=1)
# sorted() with reverse flag
if reverse is not None and not isinstance(self.__obj, dict):
if not isinstance(reverse, (bool, int)):
raise TypeError(
f'Argument "reverse" should be in type of bool/int, but got {type(reverse)}'
)
return sorted(self, key=key, reverse=reverse)
# sorted without reverse flag
elif not reverse and not isinstance(self.__obj, dict):
return sorted(self, key=key)
# a dict
elif isinstance(self.__obj, dict):
if reverse is not None:
res = sorted(self, key=key, reverse=reverse)
else:
res = sorted(self, key=key)
tmp = OrderedTable()
for key in res:
tmp[key] = self.__obj.get(key)
return tmp
def get_rsort(self, key=None):
"""
obtain a reversely sorted version of current object's value, and it
won't modify the object's value
"""
return self.get_sort(key, reverse=True)
def has_duplicate(self):
"""
if there exists duplicate items
"""
return not (len(self) == len(set(self)))
def check(self, _type=None):
"""
enable a quick check to __obj's human-readable value
"""
if self.type == 1:
if _type is not None and callable(_type):
return _type(self)
return list(self)
else:
return self.__obj
###################### pipelines: ######################
# map() -> self
# zip() -> self
# filter() -> self
# sorted() -> self
# reversed() -> self
# flatten() -> self
#
# key_only() -> self
# value_only() -> self
#
# append() -> self
# extend() -> self
# insert() -> self
# remove() -> self
# pop() -> self
# update() -> self
#
# reset() -> self
def map(self, func) -> 'CL':
self.__assert_func(func, arg_num=1)
self.__obj = map(func, self)
self.__update_type_n_cache()
return self
def zip(self, obj=None) -> 'CL':
"""
zip current object with another Iterator/Iterable or None
"""
assert isinstance(obj, (Iterator, Iterable))
self.__obj = zip(self, obj)
self.__update_type_n_cache()
return self
def filter(self, func) -> 'CL':
self.__assert_func(func, arg_num=1)
self.__obj = filter(func, self)
self.__update_type_n_cache()
return self
def sorted(self, key=None, reverse=None) -> 'CL':
"""
doing sorted() operation, and it will modify object's value and
return self as a pipeline
"""
if key:
self.__assert_func(key, arg_num=1)
# sorted() with reverse flag
if reverse is not None and not isinstance(self.__obj, dict):
if not isinstance(reverse, (bool, int)):
raise TypeError(
f'Argument "reverse" should be in type of bool/int, but got {type(reverse)}'
)
self.__obj = sorted(self, key=key, reverse=reverse)
# sorted without reverse flag
elif not reverse and not isinstance(self.__obj, dict):
self.__obj = sorted(self, key=key)
# a dict
elif isinstance(self.__obj, dict):
if reverse is not None:
res = sorted(self, key=key, reverse=reverse)
else:
res = sorted(self, key=key)
tmp = OrderedTable()
for key in res:
tmp[key] = self.__obj.get(key)
self.__obj = tmp
self.__update_type_n_cache()
return self
def rsorted(self, key=None) -> 'CL':
return self.sorted(key, reverse=True)
def reversed(self) -> 'CL':
"""
doing reversed() operation, and it will modify object's value and
return itself as a pipeline
"""
# an Iterable
if self.type == 2 and not isinstance(self.__obj, dict):
# FIXME: why I cannot use reversed(self) in here?
self.__obj = reversed(self.__obj)
# an Iterator, not reversible, convert to list for reversed()
elif self.type == 1 and not isinstance(self.__obj, dict):
self.__obj = reversed(list(self))
# a dict
elif isinstance(self.__obj, dict):
# FIXME: why I cannot use reversed(self) in here?
res = reversed(self.__obj)
tmp = OrderedTable()
for key in res:
tmp[key] = self.__obj.get(key)
self.__obj = tmp
self.__update_type_n_cache()
return self
def __flatten(self, obj):
for i in obj:
if isinstance(i, Iterable) and not isinstance(i, (str, bytes)):
yield from self.__flatten(i)
else:
yield i
def flatten(self) -> 'CL':
"""
make object "flat" and return itself as a pipeline
CL([1,2,[3,4,[5,6],7,[8,9]]]).flatten() => CL([1,2,3,4,5,6,7,8,9])
"""
self.__obj = iter([_ for _ in self.__flatten(self)])
self.__update_type_n_cache()
return self
def key_only(self) -> 'CL':
"""
only work when object's value is a dict and return itself as a pipeline
"""
if isinstance(self.__obj, dict):
self.__obj = [_ for _ in self]
self.__update_type_n_cache()
return self
def value_only(self) -> 'CL':
"""
only work when object's value is a dict and return itself as a pipeline
"""
if isinstance(self.__obj, dict):
self.__obj = [self.__obj.get(_) for _ in self]
self.__update_type_n_cache()
return self
def append(self, item) -> 'CL':
"""
append an item to the end of Iterable/Iterator/Dict
"""
# an Iterable or a dict
if self.type == 2:
self.__obj.append(item)
# an Iterator
elif self.type == 1:
tmp = list(self.__obj)
tmp.append(item)
self.__obj = iter(tmp)
return self
def extend(self, items) -> 'CL':
"""
extend a list of items to the end of Iterable/Iterator/Dict
"""
if not isinstance(items, (list, tuple, dict)):
try:
items = list(items)
except:
return self
# an Iterable or a dict
if self.type == 2:
self.__obj.extend(items)
# an Iterator
elif self.type == 1:
tmp = list(self.__obj)
tmp.extend(items)
self.__obj = iter(tmp)
return self
def insert(self) -> 'CL':
"""
insert an item to designated position of Iterable/Iterator/Dict
"""
return self
def remove(self) -> 'CL':
"""
remove an item from designated position of Iterable/Iterator/Dict
"""
return self
def pop(self) -> 'CL':
"""
pop an item from designated position of Iterable/Iterator/Dict
"""
return self
def update(self) -> 'CL':
"""
update an item to designated position of Iterable/Iterator/Dict
"""
return self
def reset(self, obj=None) -> 'CL':
"""
reset object's value if error occurs
e.g.:
>>> t = CL([1,2,3])
>>> t.map(lambda x: x[0])
>>> print(t)
it will continuously raise TypeError whenever you call the object, as
closure of map() only invokes when object is being called, the wrong
lambda function will be stored in memory until the map iterator has
been consumed
in this case use reset() to reset object's value
"""
self.__obj = self.__assert_obj(obj)
self.__update_type_n_cache()
return self
###################### basic operations: ######################
# any() -> res
# all() -> res
# reduce() -> res
# foldl() -> res
# foldr() -> res
def any(self, func=None) -> bool:
if not func:
res = any(self)
else:
self.__assert_func(func, arg_num=1)
res = any(map(func, self))
return res
def all(self, func=None) -> bool:
if not func:
res = all(self)
else:
self.__assert_func(func, arg_num=1)
res = all(map(func, self))
return res
def reduce(self, func, initial=None):
"""
reduce / foldl method
"""
self.__assert_func(func, arg_num=2)
return reduce(func, self, initial)
def fold_left(self, func, initial=None):
return self.reduce(func, initial)
def foldl(self, func, initial=None):
return self.reduce(func, initial)
def fold_right(self, func, initial=None):
"""
r_reduce / foldr method
"""
self.__assert_func(func, arg_num=2)
return reduce(func, self.reversed(), initial)
def foldr(self, func, initial=None):
return self.fold_right(func, initial)
###################### math operations: ######################
# sum() -> res
# average() -> res
# max() -> res
# min() -> res
def sum(self):
try:
return self.reduce(func=lambda x, y: x + y)
except:
return None
def average(self):
sum = self.sum()
if isinstance(sum, (int, float, complex)):
return sum / len(self)
else:
return None
def max(self, func=None, min: bool = False):
"""
can accept a function as argument for comparing a "max" value
e.g.: max(func=lambda x: len(x)) for find the longest string in an
Iterator/Iterable
if "min" is set to True, then it will return a minimum value
"""
res = None
cmp = None
if func is not None:
self.__assert_func(func, arg_num=1)
else:
func = lambda x: x
for item in self:
try:
if res is None and cmp is None:
res = item
cmp = func(item)
else:
new_cmp = func(item)
if min:
if new_cmp < cmp:
res = item
cmp = new_cmp
else:
if new_cmp > cmp:
res = item
cmp = new_cmp
except:
return None
return res
def min(self, func=None):
"""
can accept a function as argument for comparing a "min" value
e.g.: min(func=lambda x: len(x)) for find the shortest string in an
Iterator/Iterable
"""
return self.max(func=func, min=True)
###################### advance operations: ######################
# sort_by()
# group_by()
# count_by()
# distinct_by()
# flatten()
# first()
# first_not()
def sort_by(self):
pass
def group_by(self, key, attr, func):
if key is None and func is None and attr is None:
raise ValueError(
'CL.group_by() should accept at least one argument')
res = {}
if key:
pass
elif attr:
pass
else:
pass
return res
def count_by(self):
pass
def distinct_by(self):
pass
if __name__ == '__main__':
t = CL([1,2,3,4,5])
t.append('fdsafsdfdsf')
print(t)
| 29.506849 | 114 | 0.516806 |
4a2312d5aad749a7ae10fc8daa93546207067ebd | 453 | py | Python | eventkit_cloud/jobs/migrations/0014_dataprovidertype_use_bbox.py | zta6/eventkit-cloud | a9e1aaa2bbfd3d11d3cf3df91e413e6220d6e876 | [
"BSD-3-Clause"
] | 9 | 2019-02-27T19:42:02.000Z | 2021-05-09T14:16:28.000Z | eventkit_cloud/jobs/migrations/0014_dataprovidertype_use_bbox.py | zta6/eventkit-cloud | a9e1aaa2bbfd3d11d3cf3df91e413e6220d6e876 | [
"BSD-3-Clause"
] | 9 | 2019-05-14T01:23:30.000Z | 2021-05-26T07:53:09.000Z | eventkit_cloud/jobs/migrations/0014_dataprovidertype_use_bbox.py | zta6/eventkit-cloud | a9e1aaa2bbfd3d11d3cf3df91e413e6220d6e876 | [
"BSD-3-Clause"
] | 3 | 2019-04-24T07:09:54.000Z | 2021-04-14T02:42:53.000Z | # Generated by Django 3.1.2 on 2021-05-04 14:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0013_add_ogc_process_provider'),
]
operations = [
migrations.AddField(
model_name='dataprovidertype',
name='use_bbox',
field=models.BooleanField(default=False, verbose_name='Use bounding box to calculate area'),
),
]
| 23.842105 | 104 | 0.635762 |
4a231386e563f3729a208f7d303d54e6e383de34 | 6,547 | py | Python | pynet/models/CoRA.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/models/CoRA.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/models/CoRA.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
def passthrough(x, **kwargs):
return x
def ELUCons(elu, nchan):
if elu:
return nn.LeakyReLU(inplace=True)
else:
return nn.PReLU(nchan)
# normalization between sub-volumes is necessary
# for good performance
class ContBatchNorm3d(nn.modules.batchnorm._BatchNorm):
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
# super(ContBatchNorm3d, self)._check_input_dim(input)
def forward(self, input):
self._check_input_dim(input)
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
True, self.momentum, self.eps)
class LUConv(nn.Module):
def __init__(self, nchan, elu):
super(LUConv, self).__init__()
self.relu1 = ELUCons(elu, nchan)
self.conv1 = nn.Conv3d(nchan, nchan, kernel_size=5, padding=2, bias=False)
self.bn1 = ContBatchNorm3d(nchan)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
return out
def _make_nConv(nchan, depth, elu):
layers = []
for _ in range(depth):
layers.append(LUConv(nchan, elu))
return nn.Sequential(*layers)
class InputTransition(nn.Module):
def __init__(self, inChans, elu):
super(InputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, 16, kernel_size=5, padding=2, bias=False)
self.bn1 = ContBatchNorm3d(16)
self.relu1 = ELUCons(elu, 16)
def forward(self, x):
# do we want a PRELU here as well?
out = self.bn1(self.conv1(x))
# split input in to 16 channels
x16 = torch.cat((x, x, x, x, x, x, x, x,
x, x, x, x, x, x, x, x), 1)
out = self.relu1(torch.add(out, x16))
return out
class DownTransition(nn.Module):
def __init__(self, inChans, nConvs, elu, dropout=False):
super(DownTransition, self).__init__()
outChans = 2*inChans
self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2, bias=False)
self.bn1 = ContBatchNorm3d(outChans)
self.do1 = passthrough
self.relu1 = ELUCons(elu, outChans)
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x):
down = self.relu1(self.bn1(self.down_conv(x)))
out = self.do1(down)
out = self.ops(out)
out = self.relu2(torch.add(out, down))
return out
class UpTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(UpTransition, self).__init__()
self.up_conv = nn.ConvTranspose3d(inChans, outChans, kernel_size=2, stride=2, bias=False)
self.relu1 = ELUCons(elu, outChans)
self.bn1 = ContBatchNorm3d(outChans)
self.do1 = passthrough
self.do2 = nn.Dropout3d()
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x, skipx):
out = self.do1(x)
#skipxdo = self.do2(skipx)
out = self.relu1(self.bn1(self.up_conv(out)))
# print(out.shape, skipxdo.shape)
xcat = out
out = self.ops(xcat)
out = self.relu2(torch.add(out, xcat))
return out
class OutputTransition(nn.Module):
def __init__(self, inChans, outChans, elu):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2, bias=False)
self.bn1 = ContBatchNorm3d(2)
self.conv2 = nn.Conv3d(2, outChans, kernel_size=1, bias=True)
self.relu1 = ELUCons(elu, outChans)
def forward(self, x):
# convolve 32 down to 2 channels
out = self.relu1(self.bn1(self.conv1(x)))
out = self.conv2(out)
return out
class CoRA(nn.Module):
# the number of convolutions in each layer corresponds
# to what is in the actual prototxt, not the intent
def __init__(self, inChannels, outChannels, elu=True, with_labels=False):
super(CoRA, self).__init__()
self.with_labels = with_labels
# Encoder 1
self.in_tr = InputTransition(inChannels, elu)
self.down_tr32 = DownTransition(16, 1, elu)
self.down_tr64 = DownTransition(32, 2, elu)
self.down_tr128 = DownTransition(64, 3, elu, dropout=True)
self.down_tr256 = DownTransition(128, 2, elu, dropout=True)
# Decoder 1
self.up_tr256 = UpTransition(256, 128, 2, elu, dropout=True)
self.up_tr128 = UpTransition(128, 64, 2, elu, dropout=True)
self.up_tr64 = UpTransition(64, 32, 1, elu)
self.up_tr32 = UpTransition(32, 16, 1, elu)
self.out_tr = OutputTransition(16, outChannels, elu)
if self.with_labels:
# Decoder 2
self.up_tr256_2 = UpTransition(256, 128, 2, elu, dropout=True)
self.up_tr128_2 = UpTransition(128, 64, 2, elu, dropout=True)
self.up_tr64_2 = UpTransition(64, 32, 1, elu)
self.up_tr32_2 = UpTransition(32, 16, 1, elu)
self.out_tr_2 = OutputTransition(16, outChannels, elu)
self.rec = nn.MSELoss()
def forward(self, x, label=None):
self.input = x # dim == (inChannels, H, W, D)
self.out16 = self.in_tr(x) # dim == (16, H, W, D)
self.out32 = self.down_tr32(self.out16) # (32, H//2, W//2, D//2)
self.out64 = self.down_tr64(self.out32) # (64, H//4, W//4, D//4)
self.out128 = self.down_tr128(self.out64) # (128, H//8, W//8, D//8)
out256 = self.down_tr256(self.out128) # (256, H//16, W//16, D//16)
self.up128 = self.up_tr256(out256, self.out128) # (128, H//8, W//8, D//8)
self.up64 = self.up_tr128(self.up128, self.out64) # (64, H//4, W//4, D//4)
self.up32 = self.up_tr64(self.up64, self.out32) # (32, H//2, W//2, D//2)
self.up16 = self.up_tr32(self.up32, self.out16) # (16, H, W, D)
self.out = self.out_tr(self.up16) # (outChannnels, H, W, D)
return self.out
def rec_loss(self, *args, **kwargs):
return self.rec(self.up128, self.out128) + self.rec(self.up64, self.out64) + self.rec(self.up32, self.out32) + \
self.rec(self.up16, self.out16) + self.rec(self.input, self.out)
| 35.389189 | 120 | 0.608676 |
4a23147a4630c083a1cf2db91864eb8ad3a03d17 | 2,014 | py | Python | pcat2py/class/22326cdc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/22326cdc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/22326cdc-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 22326cdc-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "22326cdc-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Execute command and parse capture standard output
stdout = cli.system("grep pam_cracklib /etc/pam.d/system-auth")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
for line in self.output:
if line.startswith("password") and "maxrepeat=3" in line:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
# Desired PAM setting
value_pair = "maxrepeat=3"
sub_string = value_pair.split("=")
name = sub_string[0]
# Execute command and parse capture standard output
stdout = cli.system("grep pam_cracklib /etc/pam.d/system-auth")
# Split output lines
output = stdout.split('\n')
# Process standard output
for line in output:
password_pam_cracklib_line = ""
if line.startswith("password"):
sub_string = (line.strip()).split(" ")
for string in sub_string:
if name not in string:
password_pam_cracklib_line += string + " "
password_pam_cracklib_line += value_pair
cli.system("sed -i 's/.*pam_cracklib.*/" + password_pam_cracklib_line + "/' /etc/pam.d/system-auth")
| 33.016393 | 120 | 0.518868 |
4a2314b0c4c4cc7ba108544d4685aaec852e2d94 | 10,476 | py | Python | sensor/optimization_stock_weight_v0.py | jokbull/benew_model | 12dfcfcf48a8fd14d62faee5e79e9ac83b0f02d7 | [
"MIT"
] | 1 | 2019-05-27T02:13:47.000Z | 2019-05-27T02:13:47.000Z | sensor/optimization_stock_weight_v0.py | jokbull/benew_model | 12dfcfcf48a8fd14d62faee5e79e9ac83b0f02d7 | [
"MIT"
] | null | null | null | sensor/optimization_stock_weight_v0.py | jokbull/benew_model | 12dfcfcf48a8fd14d62faee5e79e9ac83b0f02d7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @author: scrat
import numpy as np
from cvxopt import solvers, matrix
from collider.data.sensor import Sensor
class OptimizationStockWeight(Sensor):
@property
def output_variables(self):
return ["targetWeight"]
def do(self, date, mp, **kwargs):
# region 读入参数, 对应输入的数据
# 优化器的参数
lambdax = kwargs.get("lambdax", 1) # lambda = 0.5?
tc = kwargs.get("tc", 0.003) # 手续费假定
n = kwargs.get("top", 200) # 前n个股票进入优化器
single_max = kwargs.get("single_max", 0.02) # 个股最大权重
# benchmark weight
weight_index = kwargs.get("benchmark_weight","weight_index_500")
# 因子矩阵
column = mp.alphaName
exog = mp.alphaExposure
# 行业风格矩阵
risk_column = mp.riskName
risk_factor = mp.riskExposure
# 协方差矩阵
cov = mp.factorCovariance
# 特质风险
if hasattr(mp, "sp_risk"):
sp = mp.sp_risk
else:
sp = np.zeros_like(mp.stockReturn)
# 停牌股票, non_suspend全是True/False, 没有nan
is_suspend = kwargs.get("is_susp", np.full(mp.stockReturn.size, 0))
non_suspend = is_suspend == 0
# 计算benchmark因子暴露
benchmark_exposure = mp.data_manager.get_bar(date=mp.date, columns=[weight_index])[weight_index]
benchmark_exposure = np.nan_to_num(benchmark_exposure) / np.nansum(benchmark_exposure)
benchmark_expo = np.dot(benchmark_exposure, np.nan_to_num(risk_factor))
# endregion
success = False
while (not success) and n < 1500:
stock_return = mp.stockReturn.copy()
stock_return[np.any(np.isnan(exog), axis=1)] = np.nan
# region 计算进行优化的股票集合
# 1. mp.pool中计算top_flag
# 2. holding | top_flag
# 3. 因子不缺
# step 1. 在mp.pool中计算top_flag
stock_return[~mp.pool] = np.nan # 这里在while-loop中虽然是重复计算,但是为了代码的可读性,还是放在loop里面
non_nan_cnt = np.sum(~np.isnan(stock_return))
if non_nan_cnt < n:
self.logger.warning("non_nan_cnt(%s) < n(%s)" % (non_nan_cnt, n))
n = non_nan_cnt
return_ordered_index = np.argsort(-stock_return)[:non_nan_cnt]
top_flag = np.full(stock_return.size, False, dtype=bool)
top_flag[return_ordered_index[:n]] = True
# # candidates没有nan
# candidates = (mp.weight > 0) | top_flag
# # 最近一期的因子或者风格缺失的可能性如下: top_flag中因为可以有stock_return, 所以风格因子和alpha因子
# # 都不会缺失; 持仓的股票是有可能有缺失的,比如停牌很久.
# candidates &= ~np.any(np.isnan(exog), axis=1)
# candidates &= ~np.any(np.isnan(risk_factor), axis=1)
# candidates &= ~np.isnan(sp)
candidates = top_flag.copy() # 在top_flag中的肯定是有predicted_stock_return的,所以数据肯定不缺失
# 在candidates中去掉其他nan的情况
# case 1. special_risk为nan
# candidates &= ~np.isnan(sp)
# 在candidates中还需要加入以下的情况
# case 1. 持仓 且 有stock_return (即数据不缺失)
candidates |= (mp.weight > 0) & (~np.isnan(stock_return))
# 持仓 且 停牌 且 数据缺失
holding_suspend = (mp.weight > 0) & (is_suspend == 1) & (np.isnan(stock_return))
holding_suspend_sum = np.sum(mp.weight[holding_suspend])
candidates_cnt = np.nansum(candidates)
# endregion
# 以下的部分都是基于candidates_cnt的向量进行.
# risk_matrix = risk_factor[candidates]
x = exog[candidates]
w0 = mp.weight[candidates]
if any(holding_suspend):
for ix, _ in enumerate(holding_suspend):
if _:
if any(np.isnan(exog[ix])):
self.logger.warn("Holding %s have nan factors %s" % (mp.data_manager.codes[ix], column[np.isnan(exog[ix]).ravel()]))
if any(np.isnan(risk_factor[ix])):
self.logger.warn("Holding %s have nan factors %s" % (mp.data_manager.codes[ix], risk_column[np.isnan(risk_factor[ix]).ravel()]))
r = stock_return[candidates]
sp_diag = np.diagflat(sp[candidates])
# region 构造等式约束
A_list = []
b_list = []
# 等式约束:
# 1. sum权重的 = 1
A_list.append(np.ones(shape=(1, candidates_cnt), dtype=np.double))
b_list.append(np.array([1 - holding_suspend_sum]))
# b_list.append(np.array([1]))
# 2. 停牌股票的权重锁定(有持仓不能动,没持仓不能买)
weight_locked = (candidates & ~non_suspend)[candidates]
if any(weight_locked):
weight_locked_cnt = np.nansum(weight_locked) # 需要锁定的股票个数
a_mat = np.zeros(shape=(weight_locked_cnt, candidates_cnt), dtype=np.double)
for i, j in zip(range(weight_locked_cnt), np.arange(candidates_cnt)[weight_locked]):
a_mat[i, j] = 1
A_list.append(a_mat)
b_list.append(w0[weight_locked]) # 停牌股票权重
# endregion
# region 构造不等式条件
G_list = []
h_list = []
# 1. 对于不停牌的股票,个股权重不能大于single_max
wmax = np.full(candidates_cnt, single_max)
# 个股支持最大权重不超过wmax
neq_left_1 = np.eye(candidates_cnt)
neq_right_1 = wmax
G_list.append(neq_left_1[~weight_locked])
h_list.append(neq_right_1[~weight_locked])
# 2. 在long-only的前提下,个股权重非负
neq_left_2 = -np.eye(candidates_cnt)
neq_right_2 = np.zeros(candidates_cnt)
G_list.append(neq_left_2[~weight_locked])
h_list.append(neq_right_2[~weight_locked])
# 3. 行业风格暴露约束,相对benchmark上界约束
risk_condition = kwargs.get("risk_condition", {"up": {}, "down": {}})
# 构造
for k, v in risk_condition['up'].items():
col_index = risk_column == k
w = risk_factor[candidates][:, col_index]
G_list.append(w.T)
h_list.append(np.array([benchmark_expo[col_index] + v]))
# 4. 行业风格暴露约束,相对指数下届约束
# 风格约束
for k, v in risk_condition['down'].items():
col_index = risk_column == k
w = risk_factor[candidates][:, col_index]
G_list.append(-w.T)
h_list.append(np.array([- benchmark_expo[col_index] + v]))
# endregion
# region 考虑交易费用的两步优化
# z = np.maximum(wmax - w0, w0)
#
# commission_part1_left = np.eye(candidates_cnt)
# commission_part1_right = z + w0
#
# commission_part2_left = -np.eye(candidates_cnt)
# commission_part2_right = z - w0
#
# G_list.append(commission_part1_left)
# h_list.append(commission_part1_right)
#
# G_list.append(commission_part2_left)
# h_list.append(commission_part2_right)
q = matrix(-stock_return[candidates])
P = matrix(2 * lambdax * (np.dot(np.dot(x, cov), x.T) + sp_diag))
A = matrix([matrix(a) for a in A_list], tc="d")
b = matrix([matrix(b) for b in b_list], tc="d")
G = matrix([matrix(g) for g in G_list], tc="d")
h = matrix([matrix(h) for h in h_list], tc="d")
try:
res = solvers.qp(P=P, q=q, G=G, h=h, A=A, b=b, **kwargs)
# kktsolver="ldl",
# options={"show_progress": False, "maxiters": maxiters})
if res["status"] != "optimal":
self.logger.warn("Stage 1 optimization failed at top %s, %s" % (n, date))
if n == non_nan_cnt:
raise Exception("non_nan_cnt(%s) is few at %s." % (n, date))
n += 300
else:
self.logger.trace("Stage 1 optimization succeed at top %s" % n)
weights = np.ravel(res["x"])
# 第二次优化约束条件,
# 等式约束条件不变,
# 收益算上成本
cost = np.where((weights - w0) > 0, tc, -tc)
q = matrix(-r + cost, tc="d")
# 对于第一次求解判断为买入的票限制其权重为【w1,wmax】
# 对于第一次求解判断为卖出的票限制其权重为【0,w1】
'''
neq_left_1 = np.eye(candidates_cnt)
neq_right_1 = wmax
G_list.append(neq_left_1[~weight_locked])
h_list.append(neq_right_1[~weight_locked])
'''
# 第一次求解为卖出情况
opt_neq_left_1 = np.eye(candidates_cnt)
opt_neq_right_1 = w0
sell_codes = cost < 0
G_list.append(opt_neq_left_1[sell_codes])
h_list.append(opt_neq_right_1[sell_codes])
# 第一次求解为买入情况
opt_neq_left_2 = -np.eye(candidates_cnt)
opt_neq_right_2 = -w0
G_list.append(opt_neq_left_2[~sell_codes])
h_list.append(opt_neq_right_2[~sell_codes])
G = matrix([matrix(g) for g in G_list], tc="d")
h = matrix([matrix(h) for h in h_list], tc="d")
res = solvers.qp(P=P, q=q, G=G, h=h, A=A, b=b, **kwargs)
if res["status"] == "optimal":
self.logger.trace("Stage 2 optimization succeed at top %s" % n)
success = True
else:
if n == non_nan_cnt:
raise Exception("non_nan_cnt(%s) is few at %s." % (n, date))
self.logger.warn("Stage 2 optimization failed at top %s, %s" % (n, date))
n += 300
except Exception as e:
self.logger.exception(e)
break
# endregion
if success:
# fill to the fixed length
target_weight = np.full(stock_return.size, 0, dtype=np.double)
target_weight[holding_suspend] = mp.weight[holding_suspend]
target_weight[candidates] = np.round(res["x"], 6).ravel()
else:
# 优化失败,持仓不变
self.logger.warn("No optimize solution, keep holding. %s" % date)
target_weight = mp.weight
return target_weight,
| 37.414286 | 156 | 0.524723 |
4a2317e31d84ee633eb1f64926f7f757502123df | 592 | py | Python | decorate_args/main.py | FlynnOwen/Python-utils | 7d720c6654aba69115b5bc5cdfd8052d93a438b6 | [
"MIT"
] | 2 | 2022-03-27T03:51:02.000Z | 2022-03-27T09:03:22.000Z | decorate_args/main.py | FlynnOwen/Python-utils | 7d720c6654aba69115b5bc5cdfd8052d93a438b6 | [
"MIT"
] | null | null | null | decorate_args/main.py | FlynnOwen/Python-utils | 7d720c6654aba69115b5bc5cdfd8052d93a438b6 | [
"MIT"
] | null | null | null | def prefix_decorator(prefix):
def decorator_function(original_function):
def wrapper_function(*args, **kwargs):
print(prefix, 'Executed before', original_function.__name__)
result = original_function(*args, **kwargs)
print(prefix, 'Executed after', original_function.__name__, '\n')
return result
return wrapper_function
return decorator_function
@prefix_decorator('Prefix!')
def display_info(name, age):
print(f'display_info ran with arguments ({name}, {age})')
display_info('John', 25)
display_info('Rod', 35)
| 31.157895 | 77 | 0.680743 |
4a2319e0236c6cd5771ac6ca88cf8b5573e07ab9 | 362 | py | Python | ex_08_05/ex_08_05.py | ColinTing/Python-Specialization | 7a1b096362526d3574e9d787c9a63d3d13686608 | [
"MIT"
] | 2 | 2019-05-04T12:25:34.000Z | 2019-05-04T15:18:03.000Z | ex_08_05/ex_08_05.py | ColinTing/Python-for-Everybody-Specialization | 7a1b096362526d3574e9d787c9a63d3d13686608 | [
"MIT"
] | null | null | null | ex_08_05/ex_08_05.py | ColinTing/Python-for-Everybody-Specialization | 7a1b096362526d3574e9d787c9a63d3d13686608 | [
"MIT"
] | null | null | null | fname = input("Enter file name: ")
if len(fname) < 1 :
fname = "mbox-short.txt"
fh = open(fname)
count = 0
#lst = list()
for line in fh:
line = line.rstrip()
lst = line.split()
if len(lst)>1 and lst[0] == "From":
count = count + 1
print(lst[1])
print("There were", count, "lines in the file with From as the first word")
| 24.133333 | 75 | 0.571823 |
4a231a29a5b6e41b53cc9186da387e20c4b3fd58 | 647 | py | Python | anno/views.py | voelkerb/annoticity | 8c99fcd5c42b8b170bf54eaf7474db661eb9b45d | [
"CC0-1.0"
] | 5 | 2021-03-08T16:54:19.000Z | 2021-10-02T08:14:38.000Z | anno/views.py | voelkerb/annoticity | 8c99fcd5c42b8b170bf54eaf7474db661eb9b45d | [
"CC0-1.0"
] | null | null | null | anno/views.py | voelkerb/annoticity | 8c99fcd5c42b8b170bf54eaf7474db661eb9b45d | [
"CC0-1.0"
] | null | null | null | from django.shortcuts import render
import json
from . import redd
from . import fired
from . import blond
from . import eco
from . import ukdale
def index(request):
""" view function for sales app """
reddInfo = redd.info()
firedInfo = fired.info()
blondInfo = blond.info()
ecoInfo = eco.info()
ukdaleInfo = ukdale.info()
context = {"FIREDInfo": json.dumps(firedInfo), "navbar":"FIRED", "UKDALEInfo": json.dumps(ukdaleInfo), "ECOInfo": json.dumps(ecoInfo), "BLONDInfo": json.dumps(blondInfo), "REDDInfo": json.dumps(reddInfo),'use_seconds':False}
return render(request, 'eventLabeling.html', context=context)
| 29.409091 | 228 | 0.700155 |
4a231a7ea031355ecb03dda9d780f2cbde05bd97 | 4,479 | py | Python | pocs/tests/test_config.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | 1 | 2019-07-19T10:37:08.000Z | 2019-07-19T10:37:08.000Z | pocs/tests/test_config.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | pocs/tests/test_config.py | zacharyt20/POCS | 8f785eaf27178be7d72106cb82e5400a8b852ba8 | [
"MIT"
] | null | null | null | import os
import pytest
import uuid
import yaml
from astropy import units as u
from pocs.utils.config import load_config
from pocs.utils.config import save_config
def test_load_simulator(config):
assert 'camera' in config['simulator']
assert 'mount' in config['simulator']
assert 'weather' in config['simulator']
assert 'night' in config['simulator']
def test_no_overwrite(config):
with pytest.warns(UserWarning):
save_config('pocs', config, overwrite=False)
def test_overwrite(config):
config01 = {
'foo': 'bar'
}
config02 = {
'bar': 'foo'
}
assert config01 != config02
save_config('foo', config01)
config03 = load_config('foo')
assert config01 == config03
save_config('foo', config02)
config04 = load_config('foo')
assert config02 == config04
assert config01 != config04
conf_fn = '{}/conf_files/foo.yaml'.format(os.getenv('POCS'))
os.remove(conf_fn)
assert os.path.exists(conf_fn) is False
def test_full_path():
temp_config_path = '/tmp/{}.yaml'.format(uuid.uuid4())
temp_config = {'foo': 42}
save_config(temp_config_path, temp_config)
c = load_config(temp_config_path)
assert c == temp_config
os.remove(temp_config_path)
def test_local_config():
_local_config_file = '{}/conf_files/pocs_local.yaml'.format(os.getenv('POCS'))
if not os.path.exists(_local_config_file):
conf = load_config(ignore_local=True)
assert conf['name'] == 'Generic PANOPTES Unit'
local_yaml = {
'name': 'ConfTestName'
}
with open(_local_config_file, 'w') as f:
f.write(yaml.dump(local_yaml))
conf = load_config()
assert conf['name'] != 'Generic PANOPTES Unit'
os.remove(_local_config_file)
else:
conf = load_config()
assert conf['name'] != 'Generic PANOPTES Unit'
def test_multiple_config():
config01 = {'foo': 1}
config02 = {'foo': 2, 'bar': 42}
config03 = {'bam': 'boo'}
assert config01 != config02
f01 = str(uuid.uuid4())
f02 = str(uuid.uuid4())
f03 = str(uuid.uuid4())
save_config(f01, config01)
save_config(f02, config02)
save_config(f03, config03)
config04 = load_config(f01)
config05 = load_config(f02)
config06 = load_config(f03)
assert config01 == config04
assert config02 == config05
assert config03 == config06
config07 = load_config([f01, f02], ignore_local=True)
config08 = load_config([f02, f01], ignore_local=True)
assert config07 != config01
assert config07 == config02
assert config08 != config01
assert config08 != config02
assert config08 != config05
assert 'foo' not in config06
assert 'bar' not in config06
assert 'foo' in config05
assert 'foo' in config07
assert 'foo' in config08
assert 'bar' in config05
assert 'bar' in config07
assert 'bar' in config08
assert 'bam' in config06
assert config07['foo'] == 2
assert config08['foo'] == 1
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f01))
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f02))
os.remove('{}/conf_files/{}.yaml'.format(os.getenv('POCS'), f03))
def test_no_config():
# Move existing config to temp
_config_file = '{}/conf_files/pocs.yaml'.format(os.getenv('POCS'))
_config_file_temp = '{}/conf_files/pocs_temp.yaml'.format(os.getenv('POCS'))
os.rename(_config_file, _config_file_temp)
config = load_config(ignore_local=True)
assert len(config.keys()) == 0
os.rename(_config_file_temp, _config_file)
def test_parse(config):
lat = config['location']['latitude']
assert isinstance(lat, u.Quantity)
def test_no_parse():
config = load_config(parse=False, ignore_local=True)
lat = config['location']['latitude']
assert isinstance(lat, u.Quantity) is False
assert isinstance(lat, float)
def test_location_latitude(config):
lat = config['location']['latitude']
assert lat >= -90 * u.degree and lat <= 90 * u.degree
def test_location_longitude(config):
lat = config['location']['longitude']
assert lat >= -360 * u.degree and lat <= 360 * u.degree
def test_location_positive_elevation(config):
elev = config['location']['elevation']
assert elev >= 0.0 * u.meter
def test_directories(config):
assert config['directories']['data'] == os.path.join(os.getenv('PANDIR'), 'data')
| 25.448864 | 85 | 0.658406 |
4a231b1a5fd4d69717fce7ec7c00535eccdeb34b | 4,792 | py | Python | inter_iit.py | ISROMP/MP_ISRO_T13 | 6cde7c4866a47a2819197f82e83d77735661e5d0 | [
"MIT"
] | null | null | null | inter_iit.py | ISROMP/MP_ISRO_T13 | 6cde7c4866a47a2819197f82e83d77735661e5d0 | [
"MIT"
] | null | null | null | inter_iit.py | ISROMP/MP_ISRO_T13 | 6cde7c4866a47a2819197f82e83d77735661e5d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""inter_iit.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1mrxMKcojB1HFFw6kTOKMIdMZdx7ZmPiI
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from google.colab import drive
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# image_file = get_pkg_data_filename('/content/ch2_xsm_20211111_v1_level1.fits')
from astropy.utils.data import download_file
image_file = fits.open('/content/ch2_xsm_20210917_v1_level2.lc')
image_file2 = fits.open('/content/ch2_xsm_20210923_v1_level2.lc')
image_file3 = fits.open('/content/ch2_xsm_20211108_v1_level2.lc')
img_data=image_file[1].data
img_data2=image_file2[1].data
img_data3=image_file3[1].data
y=img_data['RATE']
x=img_data['TIME']
y1=img_data2['RATE']
x1=img_data2['TIME']
y2=img_data3['RATE']
x2=img_data3['TIME']
def biner(signal):
z=[]
for i in signal:
if i> 800 and i<= 5011:
z.append(1000) #type B
elif i>5011 and i<= 25000:
z.append(5000) #type C
elif i>25000 and i<= 250000:
z.append(25000) #type M
elif i>2500000:
z.append(250000) #type X
elif i<=800: z.append(0) #inconclusive, could be A
return z
z=biner(y)
z1=biner(y1)
z2=biner(y2)
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x, z)
plt.plot(x, y)
plt.show()
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x1, z1)
plt.show()
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x2, z2)
plt.show()
def stable(signal):
timer=0
ts=0
for i in range(len(signal)):
if signal[i]!=0 and timer==0:
ts=i
timer=1
if signal[i]==0 and timer==1:
timer=0
if i-ts<120:
for j in range(ts, i+1):
signal[j]=0
return signal
f= stable(z)
f1=stable(z1)
f2=stable(z2)
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x, f)
plt.plot(x,y)
plt.show()
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x1, f1)
plt.plot(x1,y1)
plt.show()
plt_1 = plt.figure(figsize=(200,7))
plt.plot(x2, f2)
plt.plot(x2,y2)
plt.show()
def extractor( signal, time):
stable_signal= stable(biner(signal))
t_start=[]
i_start=[]
start_flux=[]
end_flux=[]
t_stop=[]
i_stop=[]
peak_count=[]
t_peak=[]
i_peak=[]
i_rise=[]
t_rise=[]
i_decay=[]
t_decay=[]
cat=[]
timer=0
for i in range(len(signal)):
if stable_signal[i] >99 and timer==0:
t_start.append(time[i])
i_start.append(i)
timer=1
if stable_signal[i] <99 and timer==1:
t_stop.append(time[i])
i_stop.append(i)
timer=0
if t_start==[]:
t_start.append(0)
i_start.append(0)
for i in range(len(t_start)):
peak_count_val=0
peak_instance=0
for index in range(i_start[i], i_stop[i]):
if peak_count_val< signal[index]:
peak_count_val= signal[index]
peak_instance= index
peak_count.append( peak_count_val )
t_peak.append( time[peak_instance])
i_peak.append(peak_instance)
bin_max= max(stable_signal[i_start[i]: i_stop[i]])
if bin_max==1000: cat.append('B')
elif bin_max==5000: cat.append('C')
elif bin_max==25000: cat.append('M')
elif bin_max==250000: cat.append('X')
# for i in range(len(t_start)):
# up_thresh= signal[i_peak[i]]/20
# down_thresh= signal[i_peak[i]]/2
# start_flux.append(up_thresh)
# end_flux.append(down_thresh)
for i in range(len(t_start)):
start_t, end_t, start_i, end_i=t_peak[i], t_peak[i], i_peak[i], i_peak[i]
while( signal[start_i]> peak_count[i]/20 and start_i>i_start[i]):
start_i-=1
while( signal[end_i]> peak_count[i]/2 and end_i<i_stop[i]):
end_i+=1
i_rise.append(start_i)
i_decay.append(end_i)
t_rise.append( time[start_i] )
t_decay.append( time[end_i] )
data={
't_start': t_start,
't_stop': t_stop,
'category': cat,
'peak count rate': peak_count,
'peak instance': t_peak,
'rise instance': t_rise,
'decay instance': t_decay,
'i_start': i_start,
'i_stop': i_stop,
'i_peak': i_peak,
'i_rise': i_peak,
'i_decay': i_decay,
}
return data
data= extractor(y, x)
t_start= data['t_start']
t_stop= data['t_stop']
t_peak= data['peak time']
t_rise= data['rise instance']
t_decay= data['decay instance']
cat= data['category']
len(t_stop)
for i in range(1):
print( t_rise[i]-t_peak[i], t_peak[i]-t_decay[i], t_start[i]-t_peak[i], t_peak[i]-t_stop[i], cat[i])
data['category']
data['peak time']
aq = y[0]-191.50723
if(aq<0.000001):
print("true")
yj=np.around(y, 3)
aq = y[0]-191.507
if(aq<0.001):
print("true")
type(x) | 20.925764 | 102 | 0.657972 |
4a231c476844b6bcdc5c0dff40754205f7b43483 | 2,305 | py | Python | vega/trainer/modules/conf/loss.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 724 | 2020-06-22T12:05:30.000Z | 2022-03-31T07:10:54.000Z | vega/trainer/modules/conf/loss.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 147 | 2020-06-30T13:34:46.000Z | 2022-03-29T11:30:17.000Z | vega/trainer/modules/conf/loss.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 160 | 2020-06-29T18:27:58.000Z | 2022-03-23T08:42:21.000Z | # -*- coding=utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Default loss configs."""
from vega.common import ConfigSerializable
import vega
class LossConfig(ConfigSerializable):
"""Default Loss Config."""
_class_type = "trainer.loss"
_exclude_keys = ['type']
_update_all_attrs = True
type = 'CrossEntropyLoss'
@classmethod
def from_dict(cls, data, skip_check=True):
"""Restore config from a dictionary or a file."""
cls = super(LossConfig, cls).from_dict(data, skip_check)
if vega.is_ms_backend():
if "params" not in data:
cls.params = {'sparse': True}
elif "sparse" not in data.params:
cls.params.update({'sparse': True})
return cls
@classmethod
def rules(cls):
"""Return rules for checking."""
rules = {"type": {"type": str},
"params": {"type": dict}}
return rules
class LossMappingDict(object):
"""Loss Mapping Dictionary."""
type_mapping_dict = dict(
CrossEntropyLoss=dict(torch='CrossEntropyLoss', tf='CrossEntropyLoss',
ms='SoftmaxCrossEntropyWithLogits'),
MixAuxiliaryLoss=dict(torch='MixAuxiliaryLoss', tf='MixAuxiliaryLoss', ms='MixAuxiliaryLoss'),
L1Loss=dict(torch='L1Loss', tf='absolute_difference', ms="L1Loss"),
MSELoss=dict(torch='MSELoss', tf='mean_squared_error', ms=None),
)
params_mapping_dict = dict(
CrossEntropyLoss=dict(
ignore_index=dict(torch='ignore_index', tf='ignore_index', ms=None),
# is_grad=dict(torch=None, tf=None, ms='is_grad'),
sparse=dict(torch=None, tf=None, ms='sparse'),
),
MixAuxiliaryLoss=dict(
loss_base=dict(torch='loss_base', tf='loss_base', ms='loss_base'),
aux_weight=dict(torch='aux_weight', tf='aux_weight', ms='aux_weight'),
)
)
| 36.015625 | 102 | 0.636009 |
4a231d83cd4de21b0fc11818433bb64ca3a12db4 | 489 | py | Python | blender/arm/logicnode/scene/LN_set_scene_active.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/scene/LN_set_scene_active.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/scene/LN_set_scene_active.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import *
class SetSceneNode(ArmLogicTreeNode):
"""Sets the active scene."""
bl_idname = 'LNSetSceneNode'
bl_label = 'Set Scene Active'
arm_version = 1
def init(self, context):
super(SetSceneNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('NodeSocketShader', 'Scene')
self.add_output('ArmNodeSocketAction', 'Out')
self.add_output('ArmNodeSocketObject', 'Root')
| 30.5625 | 54 | 0.674847 |
4a231e0719f77b0cb5128a896a17d6ee62ffa1d0 | 149 | py | Python | ding/worker/__init__.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | ding/worker/__init__.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | ding/worker/__init__.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | from .collector import *
from .learner import *
from .replay_buffer import *
from .coordinator import *
from .adapter import *
from .buffer import *
| 21.285714 | 28 | 0.758389 |
4a231e2098419df2281fba0e307580332842ba6b | 8,465 | py | Python | applications/popart/bert/utils/packed_bert_utils.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 260 | 2019-11-18T01:50:00.000Z | 2022-03-28T23:08:53.000Z | applications/popart/bert/utils/packed_bert_utils.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 27 | 2020-01-28T23:07:50.000Z | 2022-02-14T15:37:06.000Z | applications/popart/bert/utils/packed_bert_utils.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 56 | 2019-11-18T02:13:12.000Z | 2022-02-28T14:36:09.000Z | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import popart
import numpy as np
def add_inputs(model):
# make inputs directly accessible to the model
model.inputs = {}
model.labels = {}
config = model.config
# Sequence inputs
# Input mask contains a sequence index for each token in the pack
sequence_shape = [config.micro_batch_size * config.sequence_length]
sequence_info = popart.TensorInfo("UINT32", sequence_shape)
model.inputs["input_ids"] = model.builder.addInputTensor(sequence_info, "input_ids")
model.inputs["input_mask"] = model.builder.addInputTensor(sequence_info, "input_mask")
model.inputs["position_ids"] = model.builder.addInputTensor(sequence_info, "position_ids")
model.inputs["segment_ids"] = model.builder.addInputTensor(sequence_info, "segment_ids")
# MLM token ids and their respective positions.
# The masked_lm_weights contain the index of the sequence in the pack to which a token belongs
mlm_shape = [config.micro_batch_size, config.max_lm_predictions]
mlm_info = popart.TensorInfo("UINT32", mlm_shape)
model.labels["masked_lm_ids"] = model.builder.addInputTensor(mlm_info, "masked_lm_ids")
model.inputs["masked_lm_weights"] = model.builder.addInputTensor(mlm_info, "masked_lm_weights")
# NSP (there are multiple [CLS] tokens per pack)
nsp_shape = [config.micro_batch_size, config.max_sequences_per_pack]
nsp_info = popart.TensorInfo("UINT32", nsp_shape)
model.labels["nsp_labels"] = model.builder.addInputTensor(nsp_info, "nsp_labels")
model.labels["nsp_weights"] = model.builder.addInputTensor(nsp_info, "nsp_weights")
# The shapes for the constructed inputs and labels
# (in order of appearance in the dataset). Required for compatibility with legacy code
input_tensor_shapes = [sequence_shape]*4 + [mlm_shape]*2 + [nsp_shape]*2
input_tensor_names = ["input_ids", "input_mask", "segment_ids", "position_ids"]
input_tensor_names += ["masked_lm_ids", "masked_lm_weights"]
input_tensor_names += ["nsp_labels", "nsp_weights"]
return [(name, shape) for name, shape in zip(input_tensor_names, input_tensor_shapes)]
def logits_graph(model):
# Glue code for compatibility with non-packing version and naming convention
indices = model.inputs["input_ids"]
positions = model.inputs["position_ids"]
segments = model.inputs["segment_ids"]
masks = [model.inputs["input_mask"]]
list_of_logits = model.build_graph(indices, positions, segments, masks)
return list_of_logits
def attention_mask(model, x):
"""
model.input["input_mask"] is used to create the a mask which
prevents cross-contamination between sequences in a pack
"""
config = model.config
input_mask = model.inputs["input_mask"]
final_mask = model.builder.customOp(opName="AttentionMask",
opVersion=1,
domain="ai.graphcore",
inputs=[input_mask, x],
attributes={"dataType": model.config.popart_dtype})[0]
final_mask = model.detach(final_mask)
return final_mask
def slice_nsp_tokens(model, pooler_input):
"""
The nsp tokens have been rearranged to the back of the sequence and can
simply be sliced off.
"""
config = model.config
starts = config.sequence_length - config.max_sequences_per_pack
ends = config.sequence_length
pooler_input = model.builder.aiOnnxOpset9.slice([pooler_input], axes=[1], starts=[starts], ends=[ends])
pooler_input = model.builder.reshape_const(model.builder.aiOnnx, [pooler_input],
[config.micro_batch_size, config.max_sequences_per_pack,
config.hidden_size])
return pooler_input
def pretraining_loss_and_accuracy(model, logits):
# MLM and NSP loss and accuracy calculation
# some tensors are shared between the two loss calculations
# Which outputs should be streamed back to host
outputs_to_anchor = {}
config = model.config
# MLM
with model.mlm_scope:
mlm_logits = logits[0]
mlm_predictions = model.builder.aiOnnx.argmax([mlm_logits], axis=-1,
keepdims=0, debugContext=f"MLM/ArgMax")
mlm_labels = model.labels["masked_lm_ids"]
mlm_labels = model.builder.aiOnnx.cast([mlm_labels], "INT32")
mlm_seq_ind = model.inputs["masked_lm_weights"]
mlm_seq_ind = model.builder.reshape_const(model.builder.aiOnnx, [mlm_seq_ind], [config.micro_batch_size, -1])
# MLM loss
# computed on a pertoken basis (original BERT implementation)
mlm_probs = model.builder.aiOnnx.softmax([mlm_logits], axis=-1)
mlm_loss = model.builder.aiGraphcore.nllloss([mlm_probs, mlm_labels], ignoreIndex=0,
reduction=popart.ReductionType.Sum, debugContext=f"MLM/loss")
mlm_loss = model.builder.aiOnnx.cast([mlm_loss], "FLOAT")
outputs_to_anchor[mlm_loss] = popart.AnchorReturnType("SUM")
# MLM accuracy
mlm_accuracy_per_token = model.builder.aiOnnx.equal([mlm_predictions, mlm_labels])
mlm_accuracy_per_token = model.detach(mlm_accuracy_per_token)
mlm_accuracy_per_token = model.builder.aiOnnx.cast([mlm_accuracy_per_token], "FLOAT")
mlm_token_weights = model.builder.aiOnnx.greater([mlm_seq_ind, model.constant_tensor([0], dtype=np.uint32)])
mlm_token_weights = model.builder.aiOnnx.cast([mlm_token_weights], "FLOAT")
mlm_accuracy_per_token = model.builder.aiOnnx.mul([mlm_accuracy_per_token, mlm_token_weights])
mlm_accuracy = model.builder.aiOnnx.reducesum([mlm_accuracy_per_token], keepdims=False)
outputs_to_anchor[mlm_accuracy] = popart.AnchorReturnType("SUM")
# NSP accuracy and loss computed per-pack
with model.nsp_scope:
nsp_logits = logits[1]
nsp_predictions = model.builder.aiOnnx.argmax([nsp_logits], axis=-1,
keepdims=0, debugContext=f"NSP/ArgMax")
nsp_labels = model.builder.aiOnnx.cast([model.labels["nsp_labels"]], "INT32")
nsp_weights = model.builder.aiOnnx.cast([model.labels["nsp_weights"]], "INT32")
nsp_weights_f = model.builder.aiOnnx.cast([nsp_weights], "FLOAT") # 1 or 0 mask
# NSP loss
nsp_probs = model.builder.aiOnnx.softmax([nsp_logits], axis=-1)
nsp_nll_per_token = model.builder.aiGraphcore.nllloss([nsp_probs, model.labels["nsp_labels"]], ignoreIndex=None,
reduction=popart.ReductionType.NoReduction, debugContext=f"NSP/loss")
nsp_nll_per_token = model.builder.aiOnnx.cast([nsp_nll_per_token], "FLOAT")
nsp_loss = model.builder.aiOnnx.mul([nsp_nll_per_token, nsp_weights_f])
nsp_loss = model.builder.aiOnnx.reducesum([nsp_loss], keepdims=False)
outputs_to_anchor[nsp_loss] = popart.AnchorReturnType("SUM")
# NSP accuracy
nsp_accuracy_per_token = model.builder.aiOnnx.equal([nsp_labels, nsp_predictions])
nsp_accuracy_per_token = model.builder.aiOnnx.cast([nsp_accuracy_per_token], "FLOAT")
nsp_accuracy = model.builder.aiOnnx.mul([nsp_accuracy_per_token, nsp_weights_f])
nsp_accuracy = model.builder.aiOnnx.reducesum([nsp_accuracy], keepdims=False)
outputs_to_anchor[nsp_accuracy] = popart.AnchorReturnType("SUM")
# MLM + NSP is final loss
with model.final_loss_scope:
final_loss = model.builder.aiOnnx.add([mlm_loss, nsp_loss], "FinalLoss")
for out in outputs_to_anchor.keys():
model.builder.addOutputTensor(out)
return [mlm_loss, nsp_loss], [mlm_accuracy, nsp_accuracy], final_loss, outputs_to_anchor
| 50.688623 | 131 | 0.691199 |
4a231e56a4b2169f65f4a6a2e33d04beea4563af | 1,909 | py | Python | jupyter_releaser/python.py | jupyterlab-bot/jupyter_releaser | 1ef0ebcc39cb268ef8949e809582bd9a5a244dc3 | [
"BSD-3-Clause"
] | null | null | null | jupyter_releaser/python.py | jupyterlab-bot/jupyter_releaser | 1ef0ebcc39cb268ef8949e809582bd9a5a244dc3 | [
"BSD-3-Clause"
] | 59 | 2021-03-09T10:11:27.000Z | 2021-04-13T09:06:46.000Z | jupyter_releaser/python.py | jupyterlab-bot/jupyter_releaser | 1ef0ebcc39cb268ef8949e809582bd9a5a244dc3 | [
"BSD-3-Clause"
] | 1 | 2021-05-02T16:04:02.000Z | 2021-05-02T16:04:02.000Z | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import os.path as osp
import re
from glob import glob
from pathlib import Path
from tempfile import TemporaryDirectory
from jupyter_releaser import util
PYPROJECT = util.PYPROJECT
SETUP_PY = util.SETUP_PY
def build_dist(dist_dir):
"""Build the python dist files into a dist folder"""
# Clean the dist folder of existing npm tarballs
os.makedirs(dist_dir, exist_ok=True)
dest = Path(dist_dir)
for pkg in glob(f"{dist_dir}/*.gz") + glob(f"{dist_dir}/*.whl"):
os.remove(pkg)
if PYPROJECT.exists():
util.run(f"python -m build --outdir {dist_dir} .")
elif SETUP_PY.exists():
util.run(f"python setup.py sdist --dist-dir {dist_dir}")
util.run(f"python setup.py bdist_wheel --dist-dir {dist_dir}")
def check_dist(dist_file, test_cmd=""):
"""Check a Python package locally (not as a cli)"""
dist_file = util.normalize_path(dist_file)
util.run(f"twine check {dist_file}")
if not test_cmd:
# Get the package name from the dist file name
name = re.match(r"(\S+)-\d", osp.basename(dist_file)).groups()[0]
name = name.replace("-", "_")
test_cmd = f'python -c "import {name}"'
# Create venvs to install dist file
# run the test command in the venv
with TemporaryDirectory() as td:
env_path = util.normalize_path(osp.abspath(td))
if os.name == "nt": # pragma: no cover
bin_path = f"{env_path}/Scripts/"
else:
bin_path = f"{env_path}/bin"
# Create the virtual env, upgrade pip,
# install, and run test command
util.run(f"python -m venv {env_path}")
util.run(f"{bin_path}/python -m pip install -U pip")
util.run(f"{bin_path}/pip install -q {dist_file}")
util.run(f"{bin_path}/{test_cmd}")
| 33.491228 | 73 | 0.647459 |
4a231f55460cc6ddfb4ad565e8064fae0de63ca8 | 9,859 | py | Python | mapillary_tools/commands/process_and_upload.py | testbalda/mapillary_tools | 07c00bb83815e512e1b85f5957b7d1ac9dfdeb0c | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/commands/process_and_upload.py | testbalda/mapillary_tools | 07c00bb83815e512e1b85f5957b7d1ac9dfdeb0c | [
"BSD-2-Clause"
] | null | null | null | mapillary_tools/commands/process_and_upload.py | testbalda/mapillary_tools | 07c00bb83815e512e1b85f5957b7d1ac9dfdeb0c | [
"BSD-2-Clause"
] | null | null | null | import inspect
from mapillary_tools.process_user_properties import process_user_properties
from mapillary_tools.process_import_meta_properties import process_import_meta_properties
from mapillary_tools.process_geotag_properties import process_geotag_properties
from mapillary_tools.process_sequence_properties import process_sequence_properties
from mapillary_tools.process_upload_params import process_upload_params
from mapillary_tools.insert_MAPJson import insert_MAPJson
from mapillary_tools.upload import upload
class Command:
name = 'process_and_upload'
help = "Batch tool : Process images and upload to Mapillary."
def add_basic_arguments(self, parser):
parser.add_argument(
'--rerun', help='rerun the processing', action='store_true', required=False)
# user properties
# user name for the import
parser.add_argument("--user_name", help="user name", required=True)
# organization level parameters
parser.add_argument(
'--organization_username', help="Specify organization user name", default=None, required=False)
parser.add_argument(
'--organization_key', help="Specify organization key", default=None, required=False)
parser.add_argument('--private',
help="Specify whether the import is private", action='store_true', default=False, required=False)
parser.add_argument(
'--manual_done', help='Manually finalize the upload', action='store_true', default=False, required=False)
parser.add_argument(
'--skip_subfolders', help='Skip all subfolders and import only the images in the given directory path.', action='store_true', default=False, required=False)
def add_advanced_arguments(self, parser):
# master upload
parser.add_argument('--master_upload', help='Process images with a master key, note: only used by Mapillary employees',
action='store_true', default=False, required=False)
#import meta
parser.add_argument(
"--device_make", help="Specify device manufacturer. Note this input has precedence over the input read from the import source file.", default=None, required=False)
parser.add_argument(
"--device_model", help="Specify device model. Note this input has precedence over the input read from the import source file.", default=None, required=False)
parser.add_argument(
'--add_file_name', help="Add original file name to EXIF. Note this input has precedence over the input read from the import source file.", action='store_true', required=False)
parser.add_argument(
'--add_import_date', help="Add import date.", action='store_true', required=False)
parser.add_argument('--orientation', help='Specify the image orientation in degrees. Note this might result in image rotation. Note this input has precedence over the input read from the import source file.',
choices=[0, 90, 180, 270], type=int, default=None, required=False)
parser.add_argument(
"--GPS_accuracy", help="GPS accuracy in meters. Note this input has precedence over the input read from the import source file.", default=None, required=False)
parser.add_argument(
"--camera_uuid", help="Custom string used to differentiate different captures taken with the same camera make and model.", default=None, required=False)
# geotagging
parser.add_argument('--geotag_source', help='Provide the source of date/time and gps information needed for geotagging.', action='store',
choices=['exif', 'gpx', 'gopro_video', 'nmea'], default="exif", required=False)
parser.add_argument(
'--geotag_source_path', help='Provide the path to the file source of date/time and gps information needed for geotagging.', action='store',
default=None, required=False)
parser.add_argument(
'--local_time', help='Assume image timestamps are in your local time', action='store_true', default=False, required=False)
parser.add_argument('--sub_second_interval',
help='Sub second time between shots. Used to set image times with sub-second precision',
type=float, default=0.0, required=False)
parser.add_argument('--offset_time', default=0., type=float,
help='time offset between the camera and the gps device, in seconds.', required=False)
parser.add_argument('--offset_angle', default=0., type=float,
help='offset camera angle (90 for right facing, 180 for rear facing, -90 for left facing)', required=False)
parser.add_argument("--use_gps_start_time",
help="Use GPS trace starting time in case of derivating timestamp from filename.", action="store_true", default=False, required=False)
# sequence
parser.add_argument('--cutoff_distance', default=600., type=float,
help='maximum gps distance in meters within a sequence', required=False)
parser.add_argument('--cutoff_time', default=60., type=float,
help='maximum time interval in seconds within a sequence', required=False)
parser.add_argument('--interpolate_directions',
help='perform interploation of directions', action='store_true', required=False)
parser.add_argument('--flag_duplicates',
help='flag duplicates', action='store_true', required=False)
parser.add_argument('--duplicate_distance',
help='max distance for two images to be considered duplicates in meters', type=float, default=0.1, required=False)
parser.add_argument(
'--duplicate_angle', help='max angle for two images to be considered duplicates in degrees', type=float, default=5, required=False)
# EXIF insert
parser.add_argument('--skip_EXIF_insert', help='Skip inserting the extracted data into image EXIF.',
action='store_true', default=False, required=False)
parser.add_argument('--keep_original', help='Do not overwrite original images, instead save the processed images in a new directory by adding suffix "_processed" to the import_path.',
action='store_true', default=False, required=False)
parser.add_argument(
'--number_threads', help='Specify the number of upload threads.', type=int, default=None, required=False)
parser.add_argument(
'--max_attempts', help='Specify the maximum number of attempts to upload.', type=int, default=None, required=False)
# post process
parser.add_argument('--summarize', help='Summarize import for given import path.',
action='store_true', default=False, required=False)
parser.add_argument('--move_images', help='Move images corresponding to sequence uuid, duplicate flag and upload status.',
action='store_true', default=False, required=False)
parser.add_argument('--save_as_json', help='Save summary or file status list in a json.',
action='store_true', default=False, required=False)
parser.add_argument('--list_file_status', help='List file status for given import path.',
action='store_true', default=False, required=False)
parser.add_argument('--push_images', help='Push images uploaded in given import path.',
action='store_true', default=False, required=False)
parser.add_argument(
'--split_import_path', help='If splitting the import path into duplicates, sequences, success and failed uploads, provide a path for the splits.', default=None, required=False)
# add custom meta data in a form of a string consisting of a triplet
# "name,type,value"
parser.add_argument('--custom_meta_data', help='Add custom meta data to all images. Required format of input is a string, consisting of the meta data name, type and value, separated by a comma for each entry, where entries are separated by semicolon. Supported types are long, double, string, boolean, date. Example for two meta data entries "random_name1,double,12.34;random_name2,long,1234"',
default=None, required=False)
def run(self, args):
vars_args = vars(args)
process_user_properties(**({k: v for k, v in vars_args.iteritems()
if k in inspect.getargspec(process_user_properties).args}))
process_import_meta_properties(
**({k: v for k, v in vars_args.iteritems() if k in inspect.getargspec(process_import_meta_properties).args}))
process_geotag_properties(
**({k: v for k, v in vars_args.iteritems() if k in inspect.getargspec(process_geotag_properties).args}))
process_sequence_properties(
**({k: v for k, v in vars_args.iteritems() if k in inspect.getargspec(process_sequence_properties).args}))
process_upload_params(**({k: v for k, v in vars_args.iteritems()
if k in inspect.getargspec(process_upload_params).args}))
insert_MAPJson(**({k: v for k, v in vars_args.iteritems()
if k in inspect.getargspec(insert_MAPJson).args}))
print("Process done.")
upload(**({k: v for k, v in vars_args.iteritems()
if k in inspect.getargspec(upload).args}))
post_process(**({k: v for k, v in vars_args.iteritems()
if k in inspect.getargspec(post_process).args}))
| 70.421429 | 402 | 0.666396 |
4a231fdf8251014862b5592ff8df939633c425a1 | 8,844 | py | Python | src/gluonts/distribution/distribution.py | strawberrypie/gluon-ts | 1d27423478f1dc4621f81c4659d8ba78f88ee89b | [
"Apache-2.0"
] | 1 | 2020-01-19T13:27:51.000Z | 2020-01-19T13:27:51.000Z | src/gluonts/distribution/distribution.py | strawberrypie/gluon-ts | 1d27423478f1dc4621f81c4659d8ba78f88ee89b | [
"Apache-2.0"
] | null | null | null | src/gluonts/distribution/distribution.py | strawberrypie/gluon-ts | 1d27423478f1dc4621f81c4659d8ba78f88ee89b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional, Tuple
# Third-party imports
import mxnet as mx
from mxnet import autograd
import numpy as np
# First-party imports
from gluonts.model.common import Tensor
def nans_like(x: Tensor) -> Tensor:
return x.zeros_like() / 0.0
def softplus(F, x: Tensor) -> Tensor:
return F.Activation(x, act_type="softrelu")
def getF(var: Tensor):
if isinstance(var, mx.nd.NDArray):
return mx.nd
elif isinstance(var, mx.sym.Symbol):
return mx.sym
else:
raise RuntimeError("var must be instance of NDArray or Symbol in getF")
class Distribution:
r"""
A class representing probability distributions.
"""
arg_names: Tuple
is_reparameterizable = False
def log_prob(self, x: Tensor) -> Tensor:
r"""
Compute the log-density of the distribution at `x`.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the log-density of the
distribution for each event in `x`.
"""
raise NotImplementedError()
def crps(self, x: Tensor) -> Tensor:
r"""
Compute the *continuous rank probability score* (CRPS) of `x` according
to the distribution.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the CRPS score,
according to the distribution, for each event in `x`.
"""
raise NotImplementedError()
def loss(self, x: Tensor) -> Tensor:
r"""
Compute the loss at `x` according to the distribution.
By default, this method returns the negative of `log_prob`. For some
distributions, however, the log-density is not easily computable
and therefore other loss functions are computed.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the value of the loss
for each event in `x`.
"""
return -self.log_prob(x)
def prob(self, x: Tensor) -> Tensor:
r"""
Compute the density of the distribution at `x`.
Parameters
----------
x
Tensor of shape `(*batch_shape, *event_shape)`.
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the density of the
distribution for each event in `x`.
"""
return self.log_prob(x).exp()
@property
def batch_shape(self) -> Tuple:
r"""
Layout of the set of events contemplated by the distribution.
Invoking `sample()` from a distribution yields a tensor of shape
`batch_shape + event_shape`, and computing `log_prob` (or `loss`
more in general) on such sample will yield a tensor of shape
`batch_shape`.
This property is available in general only in mx.ndarray mode,
when the shape of the distribution arguments can be accessed.
"""
raise NotImplementedError()
@property
def event_shape(self) -> Tuple:
r"""
Shape of each individual event contemplated by the distribution.
For example, distributions over scalars have `event_shape = ()`,
over vectors have `event_shape = (d, )` where `d` is the length
of the vectors, over matrices have `event_shape = (d1, d2)`, and
so on.
Invoking `sample()` from a distribution yields a tensor of shape
`batch_shape + event_shape`.
This property is available in general only in mx.ndarray mode,
when the shape of the distribution arguments can be accessed.
"""
raise NotImplementedError()
@property
def event_dim(self) -> int:
r"""
Number of event dimensions, i.e., length of the `event_shape` tuple.
This is `0` for distributions over scalars, `1` over vectors,
`2` over matrices, and so on.
"""
raise NotImplementedError()
@property
def batch_dim(self) -> int:
r"""
Number of batch dimensions, i.e., length of the `batch_shape` tuple.
"""
return len(self.batch_shape)
@property
def all_dim(self) -> int:
r"""
Number of overall dimensions.
"""
return self.batch_dim + self.event_dim
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
r"""
Draw samples from the distribution.
If num_samples is given the first dimension of the output will be
num_samples.
Parameters
----------
num_samples
Number of samples to to be drawn.
dtype
Data-type of the samples.
Returns
-------
Tensor
A tensor containing samples. This has shape
`(*batch_shape, *eval_shape)` if `num_samples = None`
and `(num_samples, *batch_shape, *eval_shape)` otherwise.
"""
with autograd.pause():
var = self.sample_rep(num_samples=num_samples, dtype=dtype)
F = getF(var)
return F.BlockGrad(var)
def sample_rep(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
raise NotImplementedError()
@property
def args(self) -> List:
raise NotImplementedError()
@property
def mean(self) -> Tensor:
r"""
Tensor containing the mean of the distribution.
"""
raise NotImplementedError()
@property
def stddev(self) -> Tensor:
r"""
Tensor containing the standard deviation of the distribution.
"""
raise NotImplementedError()
@property
def variance(self) -> Tensor:
r"""
Tensor containing the variance of the distribution.
"""
return self.stddev.square()
def cdf(self, x: Tensor) -> Tensor:
r"""
Returns the value of the cumulative distribution function evaluated at x
"""
raise NotImplementedError()
def quantile(self, level: Tensor) -> Tensor:
r"""
Calculates quantiles for the given levels.
Parameters
----------
level
Level values to use for computing the quantiles.
`level` should be a 1d tensor of level values between 0 and 1.
Returns
-------
quantiles
Quantile values corresponding to the levels passed.
The return shape is
(num_levels, ...DISTRIBUTION_SHAPE...),
where DISTRIBUTION_SHAPE is the shape of the underlying distribution.
"""
raise NotImplementedError()
def slice_axis(
self, axis: int, begin: int, end: Optional[int]
) -> "Distribution":
"""
Construct a new distribution by slicing all constructor arguments
as specified by the provided bounds. Relies on ``mx.nd.slice_axis``.
"""
sliced_distr = self.__class__(
*[arg.slice_axis(axis, begin, end) for arg in self.args]
)
assert isinstance(sliced_distr, type(self))
return sliced_distr
def _expand_param(p: Tensor, num_samples: Optional[int] = None) -> Tensor:
"""
Expand parameters by num_samples along the first dimension.
"""
if num_samples is None:
return p
return p.expand_dims(axis=0).repeat(axis=0, repeats=num_samples)
def _sample_multiple(
sample_func, *args, num_samples: Optional[int] = None, **kwargs
) -> Tensor:
"""
Sample from the sample_func, by passing expanded args and kwargs and
reshaping the returned samples afterwards.
"""
args_expanded = [_expand_param(a, num_samples) for a in args]
kwargs_expanded = {
k: _expand_param(v, num_samples) for k, v in kwargs.items()
}
samples = sample_func(*args_expanded, **kwargs_expanded)
return samples
| 29.092105 | 81 | 0.604251 |
4a23206136e1051895444fde51bd589585c365f6 | 2,153 | py | Python | nicos_demo/mantid/setups/pixels.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_demo/mantid/setups/pixels.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_demo/mantid/setups/pixels.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Example Sans2D Pixel Detector Setup with Instrument View'
group = 'basic'
sysconfig = dict(
instrument = 'sans2d',
)
devices = dict(
sans2d = device('nicos_demo.mantid.devices.instrument.ViewableInstrument',
description = 'instrument object',
responsible = 'R. Esponsible <[email protected]>',
instrument = 'sans2d',
website = 'http://www.nicos-controls.org',
operators = ['ISIS developer team'],
facility = 'ISIS demo instruments',
idf = 'SANS2D_Definition.xml'
),
sample = device('nicos_mlz.sans1.devices.sans1_sample.Sans1Sample',
description = 'sample object',
),
mot_z = device('nicos.devices.generic.VirtualMotor',
description = 'front detector position in the tube',
abslimits = (19.281, 23.281),
curvalue = 23.281,
precision = 3,
speed = 1,
unit = 'm',
fmtstr = '%.3f',
),
mot_x = device('nicos.devices.generic.VirtualMotor',
description = 'horizontal offset of detector',
abslimits = (1.0, 5.0),
speed = 0.5,
unit = 'm',
curvalue = 1.1,
),
mot_omega = device('nicos.devices.generic.VirtualMotor',
description = 'tilt of detector',
abslimits = (-40, 40),
speed = 1.5,
unit = 'deg',
curvalue = 0,
fmtstr = '%.1f',
),
mantid_move_det = device('nicos_demo.mantid.devices.devices.MantidTranslationDevice',
args = {'RelativePosition': False,
'ComponentName': 'front-detector'},
x = 'mot_x',
z = 'mot_z',
lowlevel = True,
),
mantid_rot_det = device('nicos_demo.mantid.devices.devices.MantidRotationDevice',
args = {'RelativeRotation': False,
'ComponentName': 'front-detector'},
y = 1,
angle = 'mot_omega',
lowlevel = True,
),
)
startupcode = '''
printinfo("============================================================")
printinfo("Welcome to the Sans 2D Instrument View demo setup.")
printinfo("============================================================")
'''
| 32.134328 | 89 | 0.553646 |
4a232146daaff1e6c2dae009900ce361f4e24083 | 4,276 | py | Python | omnifit/fitter/functions.py | astrobot/omnifit | 7cc9e499fd149d6d3a3a15761c5380778a3f4f42 | [
"BSD-3-Clause"
] | 6 | 2015-08-25T16:40:30.000Z | 2021-05-13T08:25:07.000Z | omnifit/fitter/functions.py | astrobot/omnifit | 7cc9e499fd149d6d3a3a15761c5380778a3f4f42 | [
"BSD-3-Clause"
] | 22 | 2015-08-27T15:19:50.000Z | 2022-01-12T18:50:31.000Z | omnifit/fitter/functions.py | astrobot/omnifit | 7cc9e499fd149d6d3a3a15761c5380778a3f4f42 | [
"BSD-3-Clause"
] | 4 | 2015-12-31T19:24:48.000Z | 2019-06-18T07:28:19.000Z | import numpy as np
from astropy import convolution
def muldata(data,mul):
"""
muldata(data,mul)
Multiplies data with a given multiplier, and returns the result.
Parameters
----------
data : numpy.ndarray
The data to multiply
mul : float
The multiplier to multiply the data with
Returns
-------
The data multiplied by the multiplier.
"""
return mul*data
def flipped_egh(x,par,kernel=None):
"""
flipped_egh(x,par,kernel=None)
A flipped EGH (exponential-gaussian hybrid) function. A type of
lopsided Gaussian function.
This has been adapted from a normal EGH, which is presented in
Lan & Jorgenson, Journal of Chromatography A, 915 (2001) 1-13
Parameters
----------
x : numpy.ndarray
The x axis data of function.
par : lmfit.parameter.Parameters
The lmfit Parameters instance, which should contain the following:
* H - the magnitude of the peak maximum
* xR - the "retention time" of the precursor Gaussian
* w - the standard deviation of the precursor Gaussian
* tau - the time constant of precursor exponential
kernel : Nonetype, numpy.ndarray or astropy.convolution.Kernel
If set, the result will be convolved using this kernel.
Returns
-------
The function calculated over the range in x, and convolved with the
kernel if one was given.
"""
H=par['H'].value
xR=par['xR'].value
w=par['w'].value
tau=par['tau'].value
expFactor = np.exp((-1.0*(xR-x)**2.0)/(2.0*w*w+tau*(xR-x)))
out_y = np.where(2.0*w*w+tau*(xR-x)>0,H*expFactor,0.0)
if not(np.any(kernel)):
return out_y
else:
return convolution.convolve(out_y,kernel)
def gaussian(x,par,kernel=None):
"""
gaussian(x,par,kernel=None)
An 1D Gaussian function, structured in a way that it can be given an
lmfit Parameters instance as input.
Parameters
----------
x : numpy.ndarray
The x axis data of function.
par : lmfit.parameter.Parameters
The lmfit Parameters instance, which should contain the following:
* peak - the peak height of the Gaussian
* fwhm - the full width at half maximum of the Gaussian
* pos - the peak position of the Gaussian
kernel : Nonetype, numpy.ndarray or astropy.convolution.Kernel
If set, the result will be convolved using this kernel.
Returns
-------
The function calculated over the range in x, and convolved with the
kernel if one was given.
"""
peak=par['peak'].value
fwhm=par['fwhm'].value
pos=par['pos'].value
out_y=peak*np.exp(-2.35*(x-pos)**2./fwhm**2.)
if not(np.any(kernel)):
return out_y
else:
return convolution.convolve(out_y,kernel)
def cde_lorentzian(x,par,kernel=None):
"""
cde_lorentzian(x,par,kernel=None)
A CDE-corrected Lorentzian function, sometimes used for representing
astrophysical ices. For more information about this fucntion and at
least one application for it, see:
K. Pontoppidan et al., Astronomy & Astrophysics, 408 (2003) 981-1007
Parameters
----------
x : numpy.ndarray
The x axis data of function.
par : lmfit.parameter.Parameters
The lmfit Parameters instance, which should contain the following:
* lor1 - the first Lorentzian parameter, describing the dielectric
function at frequencies low relative to the peak described by
this function.
* lor2 - the second Lorentzian parameter, describing the plasma
frequency.
* lor3 - the third Lorentzian parameter, related to the mass and
imagined spring constant of the molecule which it describes.
* peak - the peak height of the function
* pos - the peak position of the function
kernel : Nonetype, numpy.ndarray or astropy.convolution.Kernel
If set, the result will be convolved using this kernel.
Returns
-------
The function calculated over the range in x, and convolved with the
kernel if one was given.
"""
lor1=par['lor1'].value
lor2=par['lor2'].value
lor3=par['lor3'].value
peak=par['peak'].value
pos=par['pos'].value
lorentz_oscillator=lor1+lor2**2./(pos**2.-x**2.-lor3*x*1.j)
out_y=peak*x*np.imag(2.*lorentz_oscillator*np.log10(lorentz_oscillator)/(lorentz_oscillator-1.))
if not(np.any(kernel)):
return out_y
else:
return convolution.convolve(out_y,kernel)
| 31.441176 | 98 | 0.693405 |
4a23214cb6ef31dcabd174b1e2fb16d5d23f0b02 | 106,495 | py | Python | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 64 | 2016-10-20T15:47:18.000Z | 2021-11-11T11:57:32.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 126 | 2016-10-05T10:36:14.000Z | 2019-05-15T08:43:23.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 63 | 2016-11-07T15:23:08.000Z | 2021-09-22T14:41:16.000Z | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-alias-id/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of alias ID.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__alias_id")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__alias_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-alias-id",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_alias_id(self):
"""
Getter method for alias_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/alias_id (oc-isis-types:system-id)
YANG Description: List of alias ID(s).
"""
return self.__alias_id
def _set_alias_id(self, v, load=False):
"""
Setter method for alias_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/alias_id (oc-isis-types:system-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_alias_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alias_id() directly.
YANG Description: List of alias ID(s).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """alias_id must be of a type compatible with oc-isis-types:system-id""",
"defined-type": "oc-isis-types:system-id",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}'}), is_leaf=True, yang_name="alias-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:system-id', is_config=False)""",
}
)
self.__alias_id = t
if hasattr(self, "_set"):
self._set()
def _unset_alias_id(self):
self.__alias_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
type = __builtin__.property(_get_type)
alias_id = __builtin__.property(_get_alias_id)
_pyangbind_elements = OrderedDict([("type", type), ("alias_id", alias_id)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-alias-id/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of alias ID.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__alias_id")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
self.__alias_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-alias-id",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
def _get_alias_id(self):
"""
Getter method for alias_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/alias_id (oc-isis-types:system-id)
YANG Description: List of alias ID(s).
"""
return self.__alias_id
def _set_alias_id(self, v, load=False):
"""
Setter method for alias_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_alias_id/state/alias_id (oc-isis-types:system-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_alias_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alias_id() directly.
YANG Description: List of alias ID(s).
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """alias_id must be of a type compatible with oc-isis-types:system-id""",
"defined-type": "oc-isis-types:system-id",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}'}), is_leaf=True, yang_name="alias-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-isis-types:system-id', is_config=False)""",
}
)
self.__alias_id = t
if hasattr(self, "_set"):
self._set()
def _unset_alias_id(self):
self.__alias_id = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}\\.[0-9A-Fa-f]{4}"
},
),
is_leaf=True,
yang_name="alias-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-isis-types:system-id",
is_config=False,
)
type = __builtin__.property(_get_type)
alias_id = __builtin__.property(_get_alias_id)
_pyangbind_elements = OrderedDict([("type", type), ("alias_id", alias_id)])
| 59.997183 | 7,352 | 0.489676 |
4a23216edbaeb4d6a154a269a4a9a8486f2944a6 | 2,108 | py | Python | flasky_db/env.py | simonZhou-x/flasky-simon | b1ff326a9d12bccd838348aa4edf4abc311a60c7 | [
"MIT"
] | null | null | null | flasky_db/env.py | simonZhou-x/flasky-simon | b1ff326a9d12bccd838348aa4edf4abc311a60c7 | [
"MIT"
] | null | null | null | flasky_db/env.py | simonZhou-x/flasky-simon | b1ff326a9d12bccd838348aa4edf4abc311a60c7 | [
"MIT"
] | null | null | null | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
from manage import db
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 28.486486 | 69 | 0.726281 |
4a2321a87b984a344edbeca64234cd4aaf5cefa6 | 15,162 | py | Python | dodola/core.py | brews/dodola | f209631654f9c5658bbf8a824eca7e3742c8aafe | [
"Apache-2.0"
] | 1 | 2020-11-20T21:25:47.000Z | 2020-11-20T21:25:47.000Z | dodola/core.py | brews/dodola | f209631654f9c5658bbf8a824eca7e3742c8aafe | [
"Apache-2.0"
] | null | null | null | dodola/core.py | brews/dodola | f209631654f9c5658bbf8a824eca7e3742c8aafe | [
"Apache-2.0"
] | null | null | null | """Core logic for bias-correction and downscaling
Math stuff and business logic goes here. This is the "business logic".
"""
import numpy as np
import logging
from skdownscale.spatial_models import SpatialDisaggregator
import xarray as xr
from xclim import sdba, set_options
from xclim.sdba.utils import equally_spaced_nodes
from xclim.core.calendar import convert_calendar
import xesmf as xe
logger = logging.getLogger(__name__)
# Break this down into a submodule(s) if needed.
# Assume data input here is generally clean and valid.
def train_quantiledeltamapping(
reference, historical, variable, kind, quantiles_n=100, window_n=31
):
"""Train quantile delta mapping
Parameters
----------
reference : xr.Dataset
Dataset to use as model reference.
historical : xr.Dataset
Dataset to use as historical simulation.
variable : str
Name of target variable to extract from `historical` and `reference`.
kind : {"+", "*"}
Kind of variable. Used for QDM scaling.
quantiles_n : int, optional
Number of quantiles for QDM.
window_n : int, optional
Centered window size for day-of-year grouping.
Returns
-------
xclim.sdba.adjustment.QuantileDeltaMapping
"""
qdm = sdba.adjustment.QuantileDeltaMapping(
kind=str(kind),
group=sdba.Grouper("time.dayofyear", window=int(window_n)),
nquantiles=equally_spaced_nodes(int(quantiles_n), eps=None),
)
qdm.train(ref=reference[variable], hist=historical[variable])
return qdm
def adjust_quantiledeltamapping_year(
simulation, qdm, year, variable, halfyearwindow_n=10, include_quantiles=False
):
"""Apply QDM to adjust a year within a simulation.
Parameters
----------
simulation : xr.Dataset
Daily simulation data to be adjusted. Must have sufficient observations
around `year` to adjust.
qdm : xr.Dataset or sdba.adjustment.QuantileDeltaMapping
Trained ``xclim.sdba.adjustment.QuantileDeltaMapping``, or
Dataset representation that will be instantiate
``xclim.sdba.adjustment.QuantileDeltaMapping``.
year : int
Target year to adjust, with rolling years and day grouping.
variable : str
Target variable in `simulation` to adjust. Adjusted output will share the
same name.
halfyearwindow_n : int, optional
Half-length of the annual rolling window to extract along either
side of `year`.
include_quantiles : bool, optional
Whether or not to output quantiles (sim_q) as a coordinate on
the bias corrected data variable in output.
Returns
-------
out : xr.Dataset
QDM-adjusted values from `simulation`. May be a lazy-evaluated future, not
yet computed.
"""
year = int(year)
variable = str(variable)
halfyearwindow_n = int(halfyearwindow_n)
if isinstance(qdm, xr.Dataset):
qdm = sdba.adjustment.QuantileDeltaMapping.from_dataset(qdm)
# Slice to get 15 days before and after our target year. This accounts
# for the rolling 31 day rolling window.
timeslice = slice(
f"{year - halfyearwindow_n - 1}-12-17", f"{year + halfyearwindow_n + 1}-01-15"
)
simulation = simulation[variable].sel(
time=timeslice
) # TODO: Need a check to ensure we have all the data in this slice!
if include_quantiles:
# include quantile information in output
with set_options(sdba_extra_output=True):
out = qdm.adjust(simulation, interp="nearest").sel(time=str(year))
# make quantiles a coordinate of bias corrected output variable
out = out["scen"].assign_coords(sim_q=out.sim_q)
else:
out = qdm.adjust(simulation, interp="nearest").sel(time=str(year))
return out.to_dataset(name=variable)
def train_analogdownscaling(
coarse_reference, fine_reference, variable, kind, quantiles_n=620, window_n=31
):
"""Train analog-inspired quantile-preserving downscaling
Parameters
----------
coarse_reference : xr.Dataset
Dataset to use as resampled (to fine resolution) coarse reference.
fine_reference : xr.Dataset
Dataset to use as fine-resolution reference.
variable : str
Name of target variable to extract from `coarse_reference` and `fine_reference`.
kind : {"+", "*"}
Kind of variable. Used for creating AIQPD adjustment factors.
quantiles_n : int, optional
Number of quantiles for AIQPD.
window_n : int, optional
Centered window size for day-of-year grouping.
Returns
-------
xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling
"""
# AIQPD method requires that the number of quantiles equals
# the number of days in each day group
# e.g. 20 years of data and a window of 31 = 620 quantiles
# check that lengths of input data are the same, then only check years for one
if len(coarse_reference.time) != len(fine_reference.time):
raise ValueError("coarse and fine reference data inputs have different lengths")
# check number of years in input data (subtract 2 for the +/- 15 days on each end)
num_years = len(np.unique(fine_reference.time.dt.year)) - 2
if (num_years * int(window_n)) != quantiles_n:
raise ValueError(
"number of quantiles {} must equal # of years {} * window length {}, day groups must {} days".format(
quantiles_n, num_years, int(window_n), quantiles_n
)
)
aiqpd = sdba.adjustment.AnalogQuantilePreservingDownscaling(
kind=str(kind),
group=sdba.Grouper("time.dayofyear", window=int(window_n)),
nquantiles=quantiles_n,
)
aiqpd.train(coarse_reference[variable], fine_reference[variable])
return aiqpd
def adjust_analogdownscaling(simulation, aiqpd, variable):
"""Apply AIQPD to downscale bias corrected output.
Parameters
----------
simulation : xr.Dataset
Daily bias corrected data to be downscaled.
aiqpd : xr.Dataset or sdba.adjustment.AnalogQuantilePreservingDownscaling
Trained ``xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling``, or
Dataset representation that will instantiate
``xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling``.
variable : str
Target variable in `simulation` to downscale. Downscaled output will share the
same name.
Returns
-------
out : xr.Dataset
AIQPD-downscaled values from `simulation`. May be a lazy-evaluated future, not
yet computed.
"""
variable = str(variable)
if isinstance(aiqpd, xr.Dataset):
aiqpd = sdba.adjustment.AnalogQuantilePreservingDownscaling.from_dataset(aiqpd)
out = aiqpd.adjust(simulation[variable])
return out.to_dataset(name=variable)
def apply_bias_correction(
gcm_training_ds,
obs_training_ds,
gcm_predict_ds,
train_variable,
out_variable,
method,
):
"""Bias correct input model data using specified method,
using either monthly or +/- 15 day time grouping. Currently
the QDM method is supported.
Parameters
----------
gcm_training_ds : Dataset
training model data for building quantile map
obs_training_ds : Dataset
observation data for building quantile map
gcm_predict_ds : Dataset
future model data to be bias corrected
train_variable : str
variable name used in training data
out_variable : str
variable name used in downscaled output
method : {"QDM"}
method to be used in the applied bias correction
Returns
-------
ds_predicted : xr.Dataset
Dataset that has been bias corrected.
"""
if method == "QDM":
# instantiates a grouper class that groups by day of the year
# centered window: +/-15 day group
group = sdba.Grouper("time.dayofyear", window=31)
model = sdba.adjustment.QuantileDeltaMapping(group=group, kind="+")
model.train(
ref=obs_training_ds[train_variable], hist=gcm_training_ds[train_variable]
)
predicted = model.adjust(sim=gcm_predict_ds[train_variable])
else:
raise ValueError("this method is not supported")
ds_predicted = predicted.to_dataset(name=out_variable)
return ds_predicted
def apply_downscaling(
bc_ds,
obs_climo_coarse,
obs_climo_fine,
train_variable,
out_variable,
method,
domain_fine,
weights_path=None,
):
"""Downscale input bias corrected data using specified method.
Currently only the BCSD method for spatial disaggregation is
supported.
Parameters
----------
bc_ds : Dataset
Model data that has already been bias corrected.
obs_climo_coarse : Dataset
Observation climatologies at coarse resolution.
obs_climo_fine : Dataset
Observation climatologies at fine resolution.
train_variable : str
Variable name used in obs data.
out_variable : str
Variable name used in downscaled output.
method : {"BCSD"}
Vethod to be used in the applied downscaling.
domain_fine : Dataset
Domain that specifies the fine resolution grid to downscale to.
weights_path : str or None, optional
Path to the weights file, used for downscaling to fine resolution.
Returns
-------
af_fine : xr.Dataset
A dataset of adjustment factors at fine resolution used in downscaling.
ds_downscaled : xr.Dataset
A model dataset that has been downscaled from the bias correction resolution to specified domain file resolution.
"""
if method == "BCSD":
model = SpatialDisaggregator(var=train_variable)
af_coarse = model.fit(bc_ds, obs_climo_coarse, var_name=train_variable)
# regrid adjustment factors
# BCSD uses bilinear interpolation for both temperature and precip to
# regrid adjustment factors
af_fine = xesmf_regrid(af_coarse, domain_fine, "bilinear", weights_path)
# apply adjustment factors
predicted = model.predict(
af_fine, obs_climo_fine[train_variable], var_name=train_variable
)
else:
raise ValueError("this method is not supported")
ds_downscaled = predicted.to_dataset(name=out_variable)
return af_fine, ds_downscaled
def build_xesmf_weights_file(x, domain, method, filename=None):
"""Build ESMF weights file for regridding x to a global grid
Parameters
----------
x : xr.Dataset
domain : xr.Dataset
Domain to regrid to.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
filename : optional
Local path to output netCDF weights file.
Returns
-------
outfilename : str
Path to resulting weights file.
"""
out = xe.Regridder(
x,
domain,
method=method,
filename=filename,
)
return str(out.filename)
def _add_cyclic(ds, dim):
"""
Adds wrap-around, appending first value to end of data for named dimension.
Basically an xarray version of ``cartopy.util.add_cyclic_point()``.
"""
return ds.map(
lambda x, d: xr.concat([x, x.isel({d: 0})], dim=d),
keep_attrs=True,
d=str(dim),
)
def xesmf_regrid(x, domain, method, weights_path=None, astype=None, add_cyclic=None):
"""
Regrid a Dataset.
Parameters
----------
x : xr.Dataset
domain : xr.Dataset
Domain to regrid to.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
weights_path : str, optional
Local path to netCDF file of pre-calculated XESMF regridding weights.
astype : str, numpy.dtype, or None, optional
Typecode or data-type to which the regridded output is cast.
add_cyclic : str, or None, optional
Add cyclic point (aka wrap-around pixel) to given dimension before
regridding. Useful for avoiding dateline artifacts along longitude
in global datasets.
Returns
-------
xr.Dataset
"""
if add_cyclic:
x = _add_cyclic(x, add_cyclic)
regridder = xe.Regridder(
x,
domain,
method=method,
filename=weights_path,
)
if astype:
return regridder(x).astype(astype)
return regridder(x)
def standardize_gcm(ds, leapday_removal=True):
"""
Parameters
----------
ds : xr.Dataset
leapday_removal : bool, optional
Returns
-------
xr.Dataset
"""
# Remove cruft coordinates, variables, dims.
cruft_vars = ("height", "member_id", "time_bnds")
dims_to_squeeze = []
coords_to_drop = []
for v in cruft_vars:
if v in ds.dims:
dims_to_squeeze.append(v)
elif v in ds.coords:
coords_to_drop.append(v)
ds_cleaned = ds.squeeze(dims_to_squeeze, drop=True).reset_coords(
coords_to_drop, drop=True
)
# Cleanup time.
# if variable is precip, need to update units to mm day-1
if "pr" in ds_cleaned.variables:
# units should be kg/m2/s in CMIP6 output
if ds_cleaned["pr"].units == "kg m-2 s-1":
# convert to mm/day
mmday_conversion = 24 * 60 * 60
ds_cleaned["pr"] = ds_cleaned["pr"] * mmday_conversion
# update units attribute
ds_cleaned["pr"].attrs["units"] = "mm day-1"
else:
# we want this to fail, as pr units are something we don't expect
raise ValueError("check units: pr units attribute is not kg m-2 s-1")
if leapday_removal:
# if calendar is just integers, xclim cannot understand it
if ds.time.dtype == "int64":
ds_cleaned["time"] = xr.decode_cf(ds_cleaned).time
# remove leap days and update calendar
ds_noleap = xclim_remove_leapdays(ds_cleaned)
# rechunk, otherwise chunks are different sizes
ds_out = ds_noleap.chunk({"time": 730, "lat": len(ds.lat), "lon": len(ds.lon)})
else:
ds_out = ds_cleaned
return ds_out
def xclim_remove_leapdays(ds):
"""
Parameters
----------
ds : xr.Dataset
Returns
-------
xr.Dataset
"""
ds_noleap = convert_calendar(ds, target="noleap")
return ds_noleap
def apply_wet_day_frequency_correction(ds, process):
"""
Parameters
----------
ds : xr.Dataset
process : {"pre", "post"}
Returns
-------
xr.Dataset
Notes
-------
[1] A.J. Cannon, S.R. Sobie, & T.Q. Murdock, "Bias correction of GCM precipitation by quantile mapping: How well do methods preserve changes in quantiles and extremes?", Journal of Climate, vol. 28, Issue 7, pp. 6938-6959.
"""
threshold = 0.05 # mm/day
low = 1e-16
if process == "pre":
ds_corrected = ds.where(ds != 0.0, np.random.uniform(low=low, high=threshold))
elif process == "post":
ds_corrected = ds.where(ds >= threshold, 0.0)
else:
raise ValueError("this processing option is not implemented")
return ds_corrected
| 31.456432 | 226 | 0.65895 |
4a232287cbf80188706afa6aedfe1fdfc6dccb61 | 896 | py | Python | altair/examples/simple_scatter_with_errorbars.py | Mechachleopteryx/altair | c1523443f2a3c15c6181a3a7a154351518784df7 | [
"BSD-3-Clause"
] | 6,831 | 2016-09-23T19:35:19.000Z | 2022-03-31T13:29:39.000Z | altair/examples/simple_scatter_with_errorbars.py | Mechachleopteryx/altair | c1523443f2a3c15c6181a3a7a154351518784df7 | [
"BSD-3-Clause"
] | 2,068 | 2016-09-23T14:53:23.000Z | 2022-03-31T01:43:15.000Z | altair/examples/simple_scatter_with_errorbars.py | Mechachleopteryx/altair | c1523443f2a3c15c6181a3a7a154351518784df7 | [
"BSD-3-Clause"
] | 711 | 2016-09-26T16:59:18.000Z | 2022-03-24T11:32:40.000Z | """
Simple Scatter Plot with Errorbars
----------------------------------
A simple scatter plot of a data set with errorbars.
"""
# category: scatter plots
import altair as alt
import pandas as pd
import numpy as np
# generate some data points with uncertainties
np.random.seed(0)
x = [1, 2, 3, 4, 5]
y = np.random.normal(10, 0.5, size=len(x))
yerr = 0.2
# set up data frame
source = pd.DataFrame({"x": x, "y": y, "yerr": yerr})
# the base chart
base = alt.Chart(source).transform_calculate(
ymin="datum.y-datum.yerr",
ymax="datum.y+datum.yerr"
)
# generate the points
points = base.mark_point(
filled=True,
size=50,
color='black'
).encode(
x=alt.X('x', scale=alt.Scale(domain=(0, 6))),
y=alt.Y('y', scale=alt.Scale(zero=False))
)
# generate the error bars
errorbars = base.mark_errorbar().encode(
x="x",
y="ymin:Q",
y2="ymax:Q"
)
points + errorbars
| 20.363636 | 53 | 0.631696 |
4a23231154521c0c9179cba7c140cd7bedc3ddda | 2,431 | py | Python | src/python/DTNRMAgent/RecurringActions/Plugins/CPUInfo.py | juztas/backup-siterm | 28f4027e8c9995df5a969dee3d65263b9a09d075 | [
"Apache-2.0"
] | null | null | null | src/python/DTNRMAgent/RecurringActions/Plugins/CPUInfo.py | juztas/backup-siterm | 28f4027e8c9995df5a969dee3d65263b9a09d075 | [
"Apache-2.0"
] | null | null | null | src/python/DTNRMAgent/RecurringActions/Plugins/CPUInfo.py | juztas/backup-siterm | 28f4027e8c9995df5a969dee3d65263b9a09d075 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Plugin which produces all info from lscpu
It produces:
{'CPU(s)': 2, 'L1d cache': '32K', 'CPU op-mode(s)': '32-bit, 64-bit', 'NUMA node0 CPU(s)': '0,1',
'Hypervisor vendor': 'VMware', 'L2 cache': '256K', 'L1i cache': '32K', 'CPU MHz': 3392.164,
'Core(s) per socket': 1, 'Virtualization type': 'full', 'Thread(s) per core': 1, 'On-line CPU(s) list': '0,1',
'Socket(s)': 2, 'Architecture': 'x86_64', 'Model': 60, 'Vendor ID': 'GenuineIntel', 'CPU family': 6,
'L3 cache': '8192K', 'BogoMIPS': 6784.32, 'Stepping': 3, 'Byte Order': 'Little Endian', 'NUMA node(s)': 1}
Copyright 2017 California Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Title : dtnrm
Author : Justas Balcas
Email : justas.balcas (at) cern.ch
@Copyright : Copyright (C) 2016 California Institute of Technology
Date : 2017/09/26
"""
from __future__ import print_function
import pprint
from DTNRMAgent.RecurringActions.Utilities import externalCommand, tryConvertToNumeric
from DTNRMLibs.MainUtilities import getConfig
NAME = 'CPUInfo'
def get(config):
"""Get lscpu information"""
cpuInfo = {}
tmpOut = externalCommand('lscpu')
for item in tmpOut:
for desc in item.split('\n'):
vals = desc.split(':')
if len(vals) == 2:
cpuInfo[vals[0].strip()] = tryConvertToNumeric(vals[1].strip())
else:
print('CpuInfo: Skipped this item: ', vals)
cpuInfo['num_cores'] = 1
if 'Socket(s)' in cpuInfo and 'Core(s) per socket':
try:
cpuInfo['num_cores'] = int(cpuInfo['Socket(s)']) * int(cpuInfo['Core(s) per socket'])
except Exception:
print('Failed to calculate num_cores from %s. will set to 1' % cpuInfo)
return cpuInfo
if __name__ == "__main__":
PRETTY = pprint.PrettyPrinter(indent=4)
PRETTY.pprint(get(getConfig(['/etc/dtnrm/main.conf', 'main.conf'])))
| 42.649123 | 111 | 0.663924 |
4a23241529ad082c45904a034a3d43c59f5113d9 | 3,912 | py | Python | handwritten_digits/element_wise_operations.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | handwritten_digits/element_wise_operations.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | handwritten_digits/element_wise_operations.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | import numpy as np
def naive_relu(x):
# X must be 2D Numpy tensor
assert len(x.shape) == 2
# Avoid overwriting the input tensor
x = x.copy()
for row in range(x.shape[0]):
for col in range(x.shape[1]):
x[row, col] = max(x[row, col], 0)
return x
def naive_add(x, y):
# X must be 2D Numpy tensor
assert len(x.shape) == 2
assert x.shape == y.shape
# Avoid overwriting the input tensor
x = x.copy()
for row in range(x.shape[0]):
for col in range(x.shape[1]):
x[row, col] += y[row, col]
return x
def naive_add_matrix_and_vector(x, y):
# X must be 2D Numpy tensor
assert len(x.shape) == 2
# Y must be Numpy vector
assert len(y.shape) == 1
assert x.shape[1] == y.shape[0]
# Avoid overwriting the input tensor
x = x.copy()
for row in range(x.shape[0]):
for col in range(x.shape[1]):
x[row, col] = y[col]
return x
def naive_vector_dot(x, y):
assert len(x.shape) == 1
assert len(y.shape) == 1
assert x.shape[0] == y.shape[0]
z = 0.
for row in range(x.shape[0]):
z += x[row] * y[row]
return z
def naive_matrix_vector_dot(x, y):
assert len(x.shape) == 2
assert len(y.shape) == 1
assert x.shape[1] == y.shape[0]
z = np.zeros(x.shape[0])
for row in range(x.shape[0]):
for col in range(x.shape[1]):
z[row] += x[row, col] * y[col]
return z
def naive_matrix_dot(x, y):
assert len(x.shape) == 2
assert len(y.shape) == 2
assert x.shape[1] == y.shape[0]
z = np.zeros((x.shape[0], y.shape[1]))
for row in range(x.shape[0]):
for col in range(y.shape[1]):
row_x = x[row, :]
column_y = y[:, col]
z[row, col] = naive_vector_dot(row_x, column_y)
return z
# 2D Numpy tensor
print('*** Naive ReLU ***')
input_x_2d_tensor = np.array(
[
[5, 78, 2, 34, 0],
[6, 79, 3, 35, 1],
[7, 80, 4, 36, 2]
]
)
print('Input 2D tensor shape: ', input_x_2d_tensor.shape)
print('Input 2D tensor ndim: ', input_x_2d_tensor.ndim)
output_2d_tensor = naive_relu(input_x_2d_tensor)
print('ReLU output: ', output_2d_tensor)
print('*** ---------- ***')
print()
print('*** Naive ADD operation ***')
input_y_2d_tensor = np.array(
[
[1, 7, 22, 31, 0],
[3, 9, 33, 35, 1],
[4, 8, 44, 36, 2]
]
)
output_2d_tensor = naive_add(input_x_2d_tensor, input_y_2d_tensor)
print('Naive ADD output: ', output_2d_tensor)
print('*** ---------- ***')
print()
print('*** Naive ADD operation - Matrix and Vector ***')
input_x_2d_tensor = np.array(
[
[1, 2, 3, 4],
[5, 6, 7, 8]
]
)
input_y_2d_tensor = np.array([9, 10, 11, 12])
output_2d_tensor = naive_add_matrix_and_vector(input_x_2d_tensor, input_y_2d_tensor)
print('Naive ADD operation - Matrix and Vector output: ', output_2d_tensor)
print('*** ---------- ***')
print()
print('*** Naive DOT operation - Vector ***')
x_vector = np.array([1, 2, 3])
y_vector = np.array([4, 5, 6])
dot_product = naive_vector_dot(x_vector, y_vector)
print('Naive DOT output: ', dot_product)
print('*** ---------- ***')
print()
print('*** Naive DOT operation - Matrix and Vector ***')
x_matrix = np.array(
[
[1, 2, 3, 4],
[5, 6, 7, 8]
]
)
y_vector = np.array([1, 2, 1, 2])
dot_product = naive_matrix_vector_dot(x_matrix, y_vector)
print('Naive DOT operation - Matrix and Vector output: ', dot_product)
print('*** ---------- ***')
print()
print('*** Naive DOT operation - Matrix ***')
x_matrix = np.array(
[
[1, 2, 3, 4],
[5, 6, 7, 8]
]
)
y_matrix = np.array(
[
[1, 1, 1, 1],
[0, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 0, 0]
]
)
dot_product = naive_matrix_dot(x_matrix, y_matrix)
print('Naive DOT operation - Matrix: ', dot_product)
print('*** ---------- ***')
print()
| 23.42515 | 84 | 0.55956 |
4a2324500db1154b46bf7bcc1946bf61ed677cd8 | 2,915 | py | Python | server/tests/dbtest.py | Kraken-CI/kraken | cc64cc1791e8d35bf978edac4cc65db738cf5133 | [
"Apache-2.0"
] | 66 | 2020-08-14T12:52:39.000Z | 2022-03-31T13:56:25.000Z | server/tests/dbtest.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | 110 | 2020-07-23T07:12:09.000Z | 2022-03-26T05:54:18.000Z | server/tests/dbtest.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | 4 | 2021-03-10T05:25:03.000Z | 2022-01-24T10:12:33.000Z | # Copyright 2020-2021 The Kraken Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sqlalchemy
from kraken.server.models import db
def create_empty_db(db_name, drop_exisiting=False):
db_root_url = os.environ.get('POSTGRES_URL', 'postgresql://kk:kk@localhost:15432/')
# check if db exists
engine = sqlalchemy.create_engine(db_root_url + db_name, echo=False)
db_exists = False
try:
connection = engine.connect()
connection.execute('select 1')
connection.close()
db_exists = True
except Exception:
pass
engine = sqlalchemy.create_engine(db_root_url, echo=False)
connection = engine.connect()
if db_exists and drop_exisiting:
connection.execute("commit;")
connection.execute("DROP DATABASE %s;" % db_name)
db_exists = False
# create db if missing
if not db_exists:
connection.execute("commit;")
connection.execute("CREATE DATABASE %s;" % db_name)
connection.close()
return db_root_url, db_exists
def clear_db_postresql(connection):
for table in db.metadata.tables.keys():
connection.execute('ALTER TABLE "%s" DISABLE TRIGGER ALL;' % table)
try:
connection.execute('DELETE FROM "%s";' % table)
except Exception as e:
if not "doesn't exist" in str(e):
raise
connection.execute('ALTER TABLE "%s" ENABLE TRIGGER ALL;' % table)
def prepare_db(db_name=None):
# session.close_all()
# if metadata.bind:
# metadata.bind.dispose()
if db_name is None:
db_name = os.environ.get('KK_UT_DB', 'kkut')
# db_root_url, db_exists = create_empty_db(db_name)
db_root_url, _ = create_empty_db(db_name)
# prepare connection, create any missing tables
#clean_db()
real_db_url = db_root_url + db_name
# engine = sqlalchemy.create_engine(real_db_url, echo=False)
# db.metadata.bind = engine
# db.setup_all()
# db.create_all()
# db.fix_compatibility()
# if db_exists:
# global_log.log_global('prepare_db - delete all rows', 'real_db_url', real_db_url)
# # delete all rows from all tables
# if db_url.startswith("mysql"):
# clear_db_mysql(engine)
# elif db_url.startswith("postgresql"):
# clear_db_postresql(engine)
# db.prepare_indexes(engine)
return real_db_url
| 31.010638 | 91 | 0.67307 |
4a2324e57e1f6b4908891e5e1b7193e2a683b27f | 2,860 | py | Python | logger/json_logger.py | gvalvano/idas | e1b112c8d0cd17b2b8486435dfe9de477bca2221 | [
"Apache-2.0"
] | 29 | 2020-07-04T00:04:28.000Z | 2022-03-18T01:49:34.000Z | idas/logger/json_logger.py | gvalvano/unet_crf_as_rnn | 31b79741b77614764dcf3d2690fe0b0fab44934d | [
"Apache-2.0"
] | 2 | 2020-10-31T14:41:02.000Z | 2021-11-21T18:16:19.000Z | logger/json_logger.py | gvalvano/idas | e1b112c8d0cd17b2b8486435dfe9de477bca2221 | [
"Apache-2.0"
] | 7 | 2020-10-21T01:02:52.000Z | 2021-11-14T16:52:18.000Z | # Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import io
data = {}
def add_new_node(key, values, file_name='log_file.json'):
"""
Add new node to JSON file under the key=key and with sub-keys=values.
Args:
key (string): dictionary key to address the node
values (dict): dictionary with key-values couples
file_name (string): JSON file name to write
Example:
data.update({'SPARSE_TRAINING': {'done_before': False, 'beta': 0.10, 'sparsity': 0.30}})
"""
data.update({key: values})
with io.open(file_name, 'w', encoding='utf8') as outfile:
str_ = json.dumps(data,
indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False)
outfile.write(str_)
def read_one_node(key, file_name='log_file.json'):
"""
Return the dictionary in JSON file under the key=key.
Args:
key (string): dictionary key to address the node
file_name (string): JSON file name to read
Returns:
Dictionary
"""
with open(file_name, 'r', encoding='utf8') as infile:
node = json.load(infile)
return node[key]
def update_node(key, sub_key, sub_value, file_name='log_file.json'):
"""
Update a node in a JSON file under the key=key and with sub-keys=values.
Args:
key (string): dictionary key to address the node
sub_key (string): field name to be updated under the node key
sub_value (): value to assign to the field name under the node key
file_name (string): JSON file name to write
"""
content_dict = read_one_node(key, file_name=file_name)
content_dict[sub_key] = sub_value
data.update({key: content_dict})
with io.open(file_name, 'w', encoding='utf8') as outfile:
str_ = json.dumps(data,
indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False)
outfile.write(str_)
if __name__ == '__main__':
# Example of reading and writing on JSON file
k = 'KEY_1'
val = {'flag': True, 'alpha': 0.10, 'beta': 0.30}
add_new_node(k, val)
k = 'KEY_2'
val = {'flag': False, 'alpha': 0.20, 'beta': 0.60}
add_new_node(k, val)
k = 'KEY_1'
print(read_one_node(k))
| 30.105263 | 96 | 0.633566 |
4a232581b8a60885474b07c2a1212893df675ead | 6,633 | py | Python | doc/conf.py | neuromusic/umap | 86a2ce834f37db9f5b57f58440ac66e41d035ea0 | [
"BSD-2-Clause"
] | null | null | null | doc/conf.py | neuromusic/umap | 86a2ce834f37db9f5b57f58440ac66e41d035ea0 | [
"BSD-2-Clause"
] | null | null | null | doc/conf.py | neuromusic/umap | 86a2ce834f37db9f5b57f58440ac66e41d035ea0 | [
"BSD-2-Clause"
] | 1 | 2018-08-16T11:11:08.000Z | 2018-08-16T11:11:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# umap documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 8 10:09:40 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
# 'bokeh.sphinxext.bokeh_plot',
'sphinx_gallery.gen_gallery', ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'umap'
copyright = '2018, Leland McInnes'
author = 'Leland McInnes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'umapdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'umap.tex', 'umap Documentation',
'Leland McInnes', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'umap', 'umap Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'umap', 'umap Documentation',
author, 'umap', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
'bokeh': ('http://bokeh.pydata.org/en/latest/', None),
}
# -- Options for sphinx-gallery ---------------------------------------------
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs': '../examples',
# path where to save gallery generated examples
'gallery_dirs': 'auto_examples',
'plot_gallery': False, # Turn off running the examples for now
'reference_url': {
'umap': None,
'python': 'https://docs.python.org/{.major}'.format(sys.version_info),
'numpy': 'https://docs.scipy.org/doc/numpy/',
'scipy': 'https://docs.scipy.org/doc/scipy/reference',
'matplotlib': 'https://matplotlib.org/',
'pandas': 'https://pandas.pydata.org/pandas-docs/stable/',
'sklearn': 'http://scikit-learn.org/stable/',
'bokeh': 'http://bokeh.pydata.org/en/latest/',
}
}
| 32.356098 | 82 | 0.664405 |
4a2326071aaa5e78d4d98e4f39c95090e947380b | 3,956 | py | Python | gemini_instruments/hokupaa_quirc/adclass.py | astrochun/DRAGONS | 041cdeeaf3eca9751085f6e01222d48c8e63cd3c | [
"BSD-3-Clause"
] | 1 | 2020-06-03T15:13:18.000Z | 2020-06-03T15:13:18.000Z | gemini_instruments/hokupaa_quirc/adclass.py | b1quint/DRAGONS | 0f5814caaa791bda47edf00407722247095f8427 | [
"BSD-3-Clause"
] | null | null | null | gemini_instruments/hokupaa_quirc/adclass.py | b1quint/DRAGONS | 0f5814caaa791bda47edf00407722247095f8427 | [
"BSD-3-Clause"
] | null | null | null | #
# Gemini Observatory
#
# Dragons
# gemini_instruments
# hokupaa_QUIRC.adclass.py
# ------------------------------------------------------------------------------
from astrodata import astro_data_tag
from astrodata import astro_data_descriptor
from astrodata import returns_list
from astrodata import TagSet
from ..gemini import AstroDataGemini
# ------------------------------------------------------------------------------
class AstroDataHokupaaQUIRC(AstroDataGemini):
__keyword_dict = dict(
airmass = 'AMEND',
wavelength_band = 'FILTER',
observation_type = 'IMAGETYP',
)
@staticmethod
def _matches_data(source):
return source[0].header.get('INSTRUME', '') == 'Hokupaa+QUIRC'
@astro_data_tag
def _tag_instrument(self):
return TagSet(['HOKUPAAQUIRC'])
@astro_data_tag
def _tag_image(self):
return TagSet(['IMAGE'])
@astro_data_tag
def _tag_dark(self):
if 'dark' in self.phu.get('OBJECT', '').lower():
return TagSet(['DARK'], blocks=['IMAGE', 'SPECT'])
@astro_data_tag
def _tag_flat(self):
if 'flat' in self.phu.get('OBJECT', '').lower():
return TagSet(['FLAT', 'CAL'])
@astro_data_descriptor
def airmass(self):
return self.phu.get(self._keyword_for('airmass'))
@astro_data_descriptor
def detector_name(self):
"""
Returns the name of the detector. For HOKUPAA+QUIRC, this is always
'QUIRC'
Returns
-------
<str>:
Detector name
"""
return 'QUIRC'
@astro_data_descriptor
def filter_name(self, pretty=False):
"""
This descriptor is used to display 'WaveBand' in the archive.
Parameters
----------
pretty: <bool>
This keyword parameter is present for API purposes.
It has no effect for this descriptor.
Returns
-------
<str>:
wavelength band substituting for filter_name(pretty=True)
"""
return self.wavelength_band()
@astro_data_descriptor
def instrument(self, generic=False):
"""
Returns the name of the instrument making the observation
Parameters
----------
generic: <bool>
Request the generic instrument name, if applicable.
Returns
-------
<str>:
instrument name
"""
return self.phu.get(self._keyword_for('instrument'))
@astro_data_descriptor
def observation_type(self):
"""
Returns 'type' the observation.
Returns
-------
<str>:
observation type.
"""
return self.phu.get(self._keyword_for('observation_type'))
@astro_data_descriptor
def ra(self):
"""
Returns the name of the
Returns
-------
<str>:
right ascension
"""
return self.phu.get(self._keyword_for('ra'))
@astro_data_descriptor
def dec(self):
"""
Returns the name of the
Returns
-------
<str>:
declination
"""
return self.phu.get(self._keyword_for('dec'))
@astro_data_descriptor
def wavelength_band(self):
"""
Returns the name of the bandpass of the observation.
Returns
-------
<str>:
Name of the bandpass.
"""
return self.phu.get(self._keyword_for('wavelength_band'))
@astro_data_descriptor
def target_ra(self):
return self.wcs_ra()
@astro_data_descriptor
def target_dec(self):
return self.wcs_dec()
| 24.725 | 80 | 0.512892 |
4a23263a9447c141032064b3fe856f1ea98126fa | 415 | py | Python | noise_helpers/is_class.py | marhoy/Koopen | 50b45a1bc4e6b5a157758e3091925405a28920ab | [
"CC-BY-4.0"
] | 1 | 2021-03-19T14:40:35.000Z | 2021-03-19T14:40:35.000Z | noise_helpers/is_class.py | marhoy/Koopen | 50b45a1bc4e6b5a157758e3091925405a28920ab | [
"CC-BY-4.0"
] | null | null | null | noise_helpers/is_class.py | marhoy/Koopen | 50b45a1bc4e6b5a157758e3091925405a28920ab | [
"CC-BY-4.0"
] | null | null | null | import numpy as np
def is_class(df):
if (df["weekday"] == 0) & (11 <= df["hour"] < 16 ):
return 1
if (df["weekday"] == 1) & (12 <= df["hour"] < 16):
return 1
if (df["weekday"] == 2) & (8 <= df["hour"] < 16):
return 1
if (df["weekday"] == 3) & (8 <= df["hour"] < 12):
return 1
if (df["weekday"] == 4) & (8 <= df["hour"] < 14):
return 1
return 0 | 31.923077 | 55 | 0.426506 |
4a23272d612a0b76ad94ba75b503e08efbf66d27 | 2,150 | py | Python | superset/superset/db_engine_specs/clickhouse.py | smola/superset-compose | 749803b1ad0df0f424a7f99ae0dbc6e429934c45 | [
"Apache-2.0"
] | 6 | 2019-06-14T11:16:54.000Z | 2020-11-08T16:02:00.000Z | superset/superset/db_engine_specs/clickhouse.py | smola/superset-compose | 749803b1ad0df0f424a7f99ae0dbc6e429934c45 | [
"Apache-2.0"
] | 203 | 2019-05-31T11:13:10.000Z | 2020-03-31T02:50:54.000Z | superset/superset/db_engine_specs/clickhouse.py | smola/superset-compose | 749803b1ad0df0f424a7f99ae0dbc6e429934c45 | [
"Apache-2.0"
] | 14 | 2019-05-31T11:32:40.000Z | 2021-01-28T11:18:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
from superset.db_engine_specs.base import BaseEngineSpec
class ClickHouseEngineSpec(BaseEngineSpec):
"""Dialect for ClickHouse analytical DB."""
engine = "clickhouse"
time_secondary_columns = True
time_groupby_inline = True
_time_grain_functions = {
None: "{col}",
"PT1M": "toStartOfMinute(toDateTime({col}))",
"PT5M": "toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)",
"PT10M": "toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)",
"PT15M": "toDateTime(intDiv(toUInt32(toDateTime({col})), 900)*900)",
"PT0.5H": "toDateTime(intDiv(toUInt32(toDateTime({col})), 1800)*1800)",
"PT1H": "toStartOfHour(toDateTime({col}))",
"P1D": "toStartOfDay(toDateTime({col}))",
"P1W": "toMonday(toDateTime({col}))",
"P1M": "toStartOfMonth(toDateTime({col}))",
"P0.25Y": "toStartOfQuarter(toDateTime({col}))",
"P1Y": "toStartOfYear(toDateTime({col}))",
}
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == "DATE":
return "toDate('{}')".format(dttm.strftime("%Y-%m-%d"))
if tt == "DATETIME":
return "toDateTime('{}')".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
| 41.346154 | 80 | 0.66186 |
4a2327d6a465653f310b61b803d6dc17c20d6746 | 483 | py | Python | tests/test_api.py | rafpyprog/pySGS | 095c9c1326ff9d7b5ad38297841d2bbe22654a74 | [
"MIT"
] | 66 | 2018-02-02T16:23:11.000Z | 2022-01-29T21:34:40.000Z | tests/test_api.py | rafpyprog/pySGS | 095c9c1326ff9d7b5ad38297841d2bbe22654a74 | [
"MIT"
] | 22 | 2018-05-28T00:01:30.000Z | 2021-03-20T16:25:08.000Z | tests/test_api.py | rafpyprog/pySGS | 095c9c1326ff9d7b5ad38297841d2bbe22654a74 | [
"MIT"
] | 16 | 2019-01-13T16:07:45.000Z | 2021-11-19T13:18:01.000Z | import pytest
from sgs import api
import pandas as pd
@pytest.mark.api
def test_get_data():
NUMBER_OF_LINES = 20
data = api.get_data(4, "02/01/2018", "31/01/2018")
assert isinstance(data, list)
assert len(data) == NUMBER_OF_LINES
@pytest.mark.api
def test_get_data_with_strict_range():
NUMBER_OF_LINES = 0
data = api.get_data_with_strict_range(20577, "17/08/2019", "18/08/2019")
assert isinstance(data, list)
assert len(data) == NUMBER_OF_LINES
| 21.954545 | 76 | 0.710145 |
4a2327eea34ccd3ddb84bfc4a0bc02d1ecc0eb50 | 1,823 | py | Python | opencv_learn/biaoge.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | opencv_learn/biaoge.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | opencv_learn/biaoge.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | import os
import shutil
import cv2
import numpy as np
from random import random, randint
from math import sin, cos, atan2, pi, sqrt
class Perspective:
'''
图形斜切变换
args:
img - 图片(类型: cv::mat)
bbox - 边框
[{'filename': '0.jpg', 'name': 'red_stop', 'bndbox': [299, 300, 373, 442]}, {...}, ...]
类别, [左上角x坐标, 左上角y坐标, 右下角x坐标, 右下角y坐标]
'''
def __init__(self):
self.count = 1 # 操作次数
self.warp_rate = 0.2 # 四个角斜切大小占图像的比例
def __call__(self, img, bboxes):
for bbox in bboxes:
for _ in range(self.count):
new_img, new_bbox = self._perspective(img, bbox)
img = new_img # 修改图片
bbox['bndbox'] = new_bbox # 修改bbox
return img, bboxes
def _perspective(self, img, bbox):
new_bbox = bbox['bndbox']
x1, y1, x2, y2 = bbox['bndbox']
roi_img = img[y1:y2, x1:x2, :].copy() # 要处理的区域
img[y1:y2, x1:x2, :] = 0 # 原区域置黑
h, w, _ = roi_img.shape
hr, wr = h*self.warp_rate, w*self.warp_rate
# 左上,右上,左下,右下
pts = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
pts_dst = np.float32([
[wr*random(), hr*random()],
[w-wr*random(), hr*random()],
[wr*random(), h-hr*random()],
[w-wr*random(), h-hr*random()]
])
M = cv2.getPerspectiveTransform(pts, pts_dst)
p_img = cv2.warpPerspective(roi_img, M, (h, w))
# 调整bbox
new_h, new_w, _ = p_img.shape
x2, y2 = x1+new_w, y1+new_h # 右下角坐标
img[y1:y2, x1:x2, :] = p_img # 将修改的区域放置图中
new_bbox = [x1, y1, x2, y2] # 修改bbox
return img, new_bbox
| 30.383333 | 103 | 0.47943 |
4a23280129b71abe78de78094091a82ae957357c | 261 | py | Python | oddeven.py | mahendra1904/pythod-programs | d4d75dac65e9795ea5728f75d90aa0b39296b25e | [
"bzip2-1.0.6"
] | null | null | null | oddeven.py | mahendra1904/pythod-programs | d4d75dac65e9795ea5728f75d90aa0b39296b25e | [
"bzip2-1.0.6"
] | null | null | null | oddeven.py | mahendra1904/pythod-programs | d4d75dac65e9795ea5728f75d90aa0b39296b25e | [
"bzip2-1.0.6"
] | null | null | null | n=int(input("Enter a number"))#123
rev=0
temp=n
while n>0:#n=123,12,1
rem=n%10#3,2,1
rev=rev*10+rem#3,32,321
n=n//10#12,1,0
if temp==rev:
print("The given number is palindrome number")
else:
print("The number is not palindrome number")
| 20.076923 | 50 | 0.636015 |
4a23290ad4b252618fdd648d22283a9e4561fe39 | 1,880 | py | Python | imnn/imnn/_imnn_test.py | InformationMaximisingNeuralNetworks/imnn | 2eb04d9dc1acc4e8d1e60ef0bb25dfac17bd9f81 | [
"MIT"
] | 18 | 2019-03-15T09:08:11.000Z | 2021-08-08T17:24:04.000Z | imnn/imnn/_imnn_test.py | InformationMaximisingNeuralNetworks/imnn | 2eb04d9dc1acc4e8d1e60ef0bb25dfac17bd9f81 | [
"MIT"
] | 4 | 2019-03-21T14:56:23.000Z | 2020-07-17T15:27:41.000Z | imnn/imnn/_imnn_test.py | InformationMaximisingNeuralNetworks/imnn | 2eb04d9dc1acc4e8d1e60ef0bb25dfac17bd9f81 | [
"MIT"
] | 4 | 2019-11-21T20:54:27.000Z | 2021-11-14T16:46:12.000Z | import pytest
import jax.numpy as np
from test.defaults import defaultTests
from imnn.imnn._imnn import _IMNN
test = defaultTests(imnn=_IMNN, filename="_imnn")
# Test that all initialisation parameters correctly raise errors
@pytest.mark.parametrize("kwargs", [test.kwargs, test.reduced_kwargs])
@pytest.mark.parametrize("state", [True, False])
@pytest.mark.parametrize(
"input_variable",
[None, list(), 1., 1, np.zeros((1,)), test.rng, tuple(), (0, 0),
(test.model[0], 0), test.bad_model, test.state])
@pytest.mark.parametrize("variable", test.kwargs.keys())
def test_initialisation_parameters_(variable, kwargs, input_variable, state):
test.initialise_parameters(
variable, kwargs, input_variable, state=state, validate=False)
# Test that all parameters passed to fit correctly raise errors
@pytest.mark.parametrize("kwargs", [test.kwargs, test.reduced_kwargs])
@pytest.mark.parametrize("state", [True, False])
@pytest.mark.parametrize(
"input_variable", [None, list(), 1., 1, np.zeros((1,)), test.rng])
@pytest.mark.parametrize("variable", test.fit_kwargs.keys())
def test_fit_parameters_(variable, kwargs, input_variable, state):
test.fit_parameters(
variable, kwargs, test.fit_kwargs, input_variable, state=state,
validate=False)
# Test that fitting correctly fails and get_estimate won't run and that plot
# can be made
@pytest.mark.parametrize("state", [True, False])
@pytest.mark.parametrize("fit", [True, False])
@pytest.mark.parametrize("none_first", [True, False])
@pytest.mark.parametrize("kwargs", [test.kwargs, test.reduced_kwargs])
def test_combined_running_test_(kwargs, state, fit, none_first):
test.combined_running_test(
[test.single_target_data, test.batch_target_data], kwargs,
test.fit_kwargs, state=state, validate=False, fit=fit,
none_first=none_first, implemented=False)
| 41.777778 | 77 | 0.739362 |
4a232b82b5e34c0b0cd80977266ba98940d3ba78 | 812 | py | Python | notebooks/solutions/golomb.py | xoolive/edu_constraints | 9fcb226c2bd37f7a9c74c7b83b59de607ec07e4b | [
"MIT"
] | 1 | 2020-10-13T07:15:07.000Z | 2020-10-13T07:15:07.000Z | notebooks/solutions/golomb.py | xoolive/edu_constraints | 9fcb226c2bd37f7a9c74c7b83b59de607ec07e4b | [
"MIT"
] | null | null | null | notebooks/solutions/golomb.py | xoolive/edu_constraints | 9fcb226c2bd37f7a9c74c7b83b59de607ec07e4b | [
"MIT"
] | 7 | 2020-10-13T09:40:02.000Z | 2020-11-03T07:21:24.000Z | import facile
def golomb(n: int) -> facile.Solution:
ticks = [facile.variable(range(2 ** n)) for i in range(n)]
# First tick at the start of the ruler
facile.constraint(ticks[0] == 0)
# Ticks are ordered
for i in range(n - 1):
facile.constraint(ticks[i] < ticks[i + 1])
# All distances
distances = []
for i in range(n - 1):
for j in range(i + 1, n):
distances.append(facile.variable(ticks[j] - ticks[i]))
facile.constraint(facile.alldifferent(distances))
for d in distances:
facile.constraint(d > 0)
# Breaking the symmetry
size = len(distances)
facile.constraint(distances[size - 1] > distances[0])
return facile.minimize(
ticks, ticks[n - 1], backtrack=True, on_solution=print
)
print(golomb(9))
| 23.882353 | 66 | 0.614532 |
4a232bc70d6f58ad267d1f5e284b49fa227ef29c | 756 | py | Python | bothanasius/cogs/meta.py | bryanforbes/Bothanasius | 2fb264f52de46c4ea3dccd57d23c76c9dd313e3e | [
"BSD-3-Clause"
] | null | null | null | bothanasius/cogs/meta.py | bryanforbes/Bothanasius | 2fb264f52de46c4ea3dccd57d23c76c9dd313e3e | [
"BSD-3-Clause"
] | null | null | null | bothanasius/cogs/meta.py | bryanforbes/Bothanasius | 2fb264f52de46c4ea3dccd57d23c76c9dd313e3e | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import logging
from discord.ext import commands
from ..context import Context
from ..bothanasius import Bothanasius
log = logging.getLogger(__name__)
class Meta(commands.Cog[Context]):
def __init__(self, bot: Bothanasius) -> None:
self.bot = bot
@commands.is_owner()
@commands.command(name='reload', hidden=True)
async def _reload(self, ctx: Context, module: str) -> None:
try:
self.bot.reload_extension(f'bothanasius.cogs.{module}')
except commands.ExtensionError as e:
await ctx.send(f'{e.__class__.__name__}: {e}')
log.exception('Failed to load extension %s.', module)
def setup(bot: Bothanasius) -> None:
bot.add_cog(Meta(bot))
| 26.068966 | 67 | 0.677249 |
4a232c9d3e8eb8abd77a9cdd5aac9ab0c7b55646 | 5,381 | py | Python | requires.py | openstack/charm-interface-rabbitmq | 383121fc584d2d3bf9d233eba0d3708398a4c468 | [
"Apache-2.0"
] | 12 | 2016-07-07T23:42:18.000Z | 2019-01-28T21:53:38.000Z | requires.py | openstack/charm-interface-rabbitmq | 383121fc584d2d3bf9d233eba0d3708398a4c468 | [
"Apache-2.0"
] | null | null | null | requires.py | openstack/charm-interface-rabbitmq | 383121fc584d2d3bf9d233eba0d3708398a4c468 | [
"Apache-2.0"
] | 1 | 2018-10-11T15:48:50.000Z | 2018-10-11T15:48:50.000Z | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from charms.reactive import RelationBase
from charms.reactive import hook
from charms.reactive import scopes
from charmhelpers.core import hookenv
class RabbitMQRequires(RelationBase):
scope = scopes.GLOBAL
# These remote data fields will be automatically mapped to accessors
# with a basic documentation string provided.
auto_accessors = ['password', 'private-address', 'ssl_port',
'ssl_ca', 'ha_queues', 'ha-vip-only', 'clustered', 'vip']
def vhost(self):
return self.get_local('vhost')
def username(self):
return self.get_local('username')
@hook('{requires:rabbitmq}-relation-joined')
def joined(self):
self.set_state('{relation_name}.connected')
def update_state(self):
if self.base_data_complete():
self.set_state('{relation_name}.available')
if self.ssl_data_complete():
self.set_state('{relation_name}.available.ssl')
else:
self.remove_state('{relation_name}.available.ssl')
else:
self.remove_state('{relation_name}.available')
self.remove_state('{relation_name}.available.ssl')
if not self.rabbitmq_connected_hosts():
self.remove_state('{relation_name}.connected')
@hook('{requires:rabbitmq}-relation-changed')
def changed(self):
self.update_state()
@hook('{requires:rabbitmq}-relation-{broken,departed}')
def departed(self):
self.update_state()
def base_data_complete(self):
"""
Get the connection string, if available, or None.
"""
data = {
'hostname': self.private_address(),
'vhost': self.vhost(),
'username': self.username(),
'password': self.password(),
}
if all(data.values()):
return True
return False
def ssl_data_complete(self):
"""
Get the connection string, if available, or None.
"""
data = {
'ssl_port': self.ssl_port(),
'ssl_ca': self.ssl_ca(),
}
if all(data.values()):
return True
return False
def request_access(self, username, vhost, hostname=None):
"""
Request access to vhost for the supplied username.
"""
if not hostname:
try:
hostname = hookenv.network_get_primary_address(
self.conversation().relation_name
)
except NotImplementedError:
hostname = hookenv.unit_private_ip()
relation_info = {
'username': username,
'vhost': vhost,
'private-address': hostname,
}
self.set_local(**relation_info)
self.set_remote(**relation_info)
def configure(self, username, vhost):
"""
DEPRECATED: use request_access instead
Request access to vhost for the supplied username.
"""
self.request_access(username, vhost)
def get_remote_all(self, key, default=None):
"""Return a list of all values presented by remote units for key"""
values = []
for conversation in self.conversations():
for relation_id in conversation.relation_ids:
for unit in hookenv.related_units(relation_id):
value = hookenv.relation_get(key,
unit,
relation_id) or default
if value:
values.append(value)
return list(set(values))
def rabbitmq_connected_hosts(self):
"""Return list of connected rabbit units."""
return self.get_remote_all('private-address')
def rabbitmq_ready_hosts(self):
"""Return list of rabbit units ready to accept client connections."""
hosts = []
for conversation in self.conversations():
for relation_id in conversation.relation_ids:
for unit in hookenv.related_units(relation_id):
rdata = hookenv.relation_get(unit=unit, rid=relation_id)
if rdata.get('password') and rdata.get('private-address'):
hosts.append(rdata['private-address'])
return sorted(hosts)
def rabbitmq_hosts(self):
"""
DEPRECATED: Use rabbitmq_connected_hosts or rabbitmq_ready_hosts
Return list of rabbit units ready to accept client connections."""
return self.rabbitmq_ready_hosts()
def get_ssl_cert(self):
"""Return decoded CA cert from rabbit or None if no CA present"""
if self.ssl_ca():
return base64.b64decode(self.ssl_ca()).decode('utf-8')
return None
| 35.169935 | 79 | 0.60565 |
4a232e33c209c63692082e6204eb3fc5384b8e81 | 670 | py | Python | app/tests/tech/plasma_attacker.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | 3 | 2021-05-12T20:32:06.000Z | 2022-02-25T21:29:23.000Z | app/tests/tech/plasma_attacker.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | 109 | 2021-01-10T11:09:11.000Z | 2021-03-25T20:33:13.000Z | app/tests/tech/plasma_attacker.py | systemicsmitty/TI4_battle_sim | b4ed142ff57d19ed50705ba40f83b8b3b7e3a774 | [
"MIT"
] | 1 | 2021-03-25T00:49:12.000Z | 2021-03-25T00:49:12.000Z | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../../..'))
import app.calculator.calculator as calc
from app import testing_helpers
attacker, defender, options, tol = testing_helpers.defaults()
# target source: http://alphamou.se/ti4calc/
target = [3, 86, 11] # target percentages; [tie, attacker, defender]
print("1 Fighter 1 PDS (Plasma Scoring) vs 1 Fighter")
# Units
attacker["fighter"] = 1
attacker["pds"] = 1
defender["fighter"] = 1
# Factions
# Ground Combat
options["ground_combat"] = False
# Options
options["att_plasma"] = True
outcomes = calc.calculate(attacker, defender, options)
testing_helpers.evaluate(outcomes, target, tol)
| 23.103448 | 69 | 0.726866 |
4a232e3ea4158158a05476ec4dae24e39a50c0a2 | 4,146 | py | Python | purity_fb/purity_fb_1dot9/models/support_response.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | purity_fb/purity_fb_1dot9/models/support_response.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | purity_fb/purity_fb_1dot9/models/support_response.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SupportResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[Support]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
SupportResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this SupportResponse.
pagination information, only available in GET requests
:return: The pagination_info of this SupportResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this SupportResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this SupportResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this SupportResponse.
a list of support objects
:return: The items of this SupportResponse.
:rtype: list[Support]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this SupportResponse.
a list of support objects
:param items: The items of this SupportResponse.
:type: list[Support]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SupportResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.922078 | 204 | 0.573806 |
4a232fe9564372234ceee65a6374e1979d68bbc5 | 3,234 | py | Python | src/toml_config/settings.py | meaksh/bgzfiltra | 4724bd919c7c440f5388c8718d3d75cd637e51f4 | [
"MIT"
] | 1 | 2021-05-26T15:44:57.000Z | 2021-05-26T15:44:57.000Z | src/toml_config/settings.py | meaksh/bgzfiltra | 4724bd919c7c440f5388c8718d3d75cd637e51f4 | [
"MIT"
] | 1 | 2021-05-19T09:55:27.000Z | 2021-05-19T09:55:27.000Z | src/toml_config/settings.py | meaksh/bgzfiltra | 4724bd919c7c440f5388c8718d3d75cd637e51f4 | [
"MIT"
] | 1 | 2021-05-26T15:48:52.000Z | 2021-05-26T15:48:52.000Z | import os
import sys
import toml
def get_settings():
"""\
Loads TOML settings from one of the defined paths:
./.bgzfiltra.toml
./bgzfiltra.toml
~/.bgzfiltra.toml
~/.config/bgzfiltra.toml
/etc/bgzfiltra.toml
"""
paths = (
"./.bgzfiltra.toml",
"./bgzfiltra.toml",
"~/.bgzfiltra.toml",
"~/.config/bgzfiltra.toml",
"/etc/bgzfiltra.toml",
)
settings = None
for path in paths:
path = os.path.expanduser(path)
if (
os.path.isfile(path)
and not os.path.isdir(path)
and not os.path.islink(path)
):
settings = toml.load(path)
break
if not settings:
print(
"Could not find settings file in any of these locations:\n{}".format(
"\n".join(paths)
)
)
sys.exit(3)
_bugzilla_section_checks(settings)
_questdb_section_checks(settings)
return settings
def _questdb_section_checks(settings):
"""
QuestDB specific setting validations.
"""
if not settings.get("questdb"):
print(
'questdb section missing in settings file:\n[questdb]\nuser = "admin"\n…',
file=sys.stderr,
)
sys.exit(2)
if "user" not in settings["questdb"]:
print(
'username definition missing in questdb section: user = "admin"',
file=sys.stderr,
)
sys.exit(2)
if "password" not in settings["questdb"]:
print(
'password definition missing in questdb section: password = "mypassword"',
file=sys.stderr,
)
sys.exit(2)
if "host" not in settings["questdb"]:
print(
'host definition missing in questdb section: host = "127.0.0.1"',
file=sys.stderr,
)
sys.exit(2)
if "port" not in settings["questdb"]:
print(
'port definition missing in questdb section: port = "8812"',
file=sys.stderr,
)
sys.exit(2)
if "database" not in settings["questdb"]:
print(
'database definition missing in questdb section: database = "mydb"',
file=sys.stderr,
)
sys.exit(2)
def _bugzilla_section_checks(settings):
"""
Bugzilla specific setting validations.
"""
if not settings.get("bugzilla"):
print(
'bugzilla section missing in settings file:\n[bugzilla]\nusernames = "[email protected]"\npassword = "mypassword"',
file=sys.stderr,
)
sys.exit(2)
if "url" not in settings["bugzilla"]:
print(
'url definition missing in settings file: url = "bugzilla.myurl.com"',
file=sys.stderr,
)
sys.exit(2)
if "username" not in settings["bugzilla"]:
print(
'username definition missing in settings file: usernames = "[email protected]"',
file=sys.stderr,
)
sys.exit(2)
if "password" not in settings["bugzilla"]:
print(
'password definition missing in settings file: password = "mypassword"',
file=sys.stderr,
)
sys.exit(2)
| 28.368421 | 120 | 0.544836 |
4a233018ca957db3a0b1948e2f587534d9872ba5 | 8,445 | py | Python | aggregation-repulsion-parameter-search.py | fberlinger/UndercoverBot | ce6c1c02114cf52b0afe03af452398f7ccf12d2f | [
"MIT"
] | null | null | null | aggregation-repulsion-parameter-search.py | fberlinger/UndercoverBot | ce6c1c02114cf52b0afe03af452398f7ccf12d2f | [
"MIT"
] | null | null | null | aggregation-repulsion-parameter-search.py | fberlinger/UndercoverBot | ce6c1c02114cf52b0afe03af452398f7ccf12d2f | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.rcParams['figure.figsize'] = [12, 8]
import math
import numpy as np
from queue import Queue, PriorityQueue
import time
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from interaction import Interaction
from environment import Environment
from DelightFish import Fish
from channel import Channel
from observer import Observer
from utils import generate_distortion, generate_fish, generate_replica_fish, generate_all_fish, run_simulation
"""
This file runs tests used to set the parameter for the Delight Fish.
When run, it will output graphs of varying values of K_ar parameter.
"""
# 21 categorical colors. Used for plotting
colors = [
[230/255, 25/255, 75/255, 1.0],
[60/255, 180/255, 75/255, 1.0],
[255/255, 225/255, 25/255, 1.0],
[0/255, 130/255, 200/255, 1.0],
[245/255, 130/255, 48/255, 1.0],
[145/255, 30/255, 180/255, 1.0],
[70/255, 240/255, 240/255, 1.0],
[240/255, 50/255, 230/255, 1.0],
[210/255, 245/255, 60/255, 1.0],
[250/255, 190/255, 190/255, 1.0],
[0/255, 128/255, 128/255, 1.0],
[230/255, 190/255, 255/255, 1.0],
[170/255, 110/255, 40/255, 1.0],
[255/255, 250/255, 200/255, 1.0],
[128/255, 0/255, 0/255, 1.0],
[170/255, 255/255, 195/255, 1.0],
[128/255, 128/255, 0/255, 1.0],
[255/255, 215/255, 180/255, 1.0],
[0/255, 0/255, 128/255, 1.0],
[128/255, 128/255, 128/255, 1.0],
[0/255, 0/255, 0/255, 1.0],
]
def run_trial(run_time, num_fish, initial_spread, k_ar, alpha):
"""
Run a single simulation.
Arguments:
run_time {int} -- Length of time to run simulation
num_fish {int} -- Number of fish in swarm
initial_spread {int} -- Initial spread of fish's randomly initialized positions.
This is essentially the max diameter of the school, where we to encircle it,
at the start of the simulation
k_ar {float} -- Paramter used by delight fish to weight importance of each
neighbor's contribution to final velocity
alpha {float} -- Equilibrium distance for Delight Fish's neighbors
Returns:
fish_xs {list of int lists} - x positions of each fish at each timestep.
fish_ys {list of int lists} - y positions of each fish at each timestep
neighbor_distances {float list} -- the average distance between a fish
and its detected neighbors across all time steps
avg_speeds {float list} -- the average speed of all fish at each time step
"""
run_time = run_time # in seconds
num_fish = num_fish
num_replica_fish = 0
arena_size = 200
arena_center = arena_size / 2.0
initial_spread = initial_spread
fish_pos = initial_spread * np.random.rand(num_fish + num_replica_fish, 2) + arena_center - initial_spread / 2.0
clock_freqs = 1
verbose = False
distortion = generate_distortion(type='none', n=arena_size)
environment = Environment(
node_pos=fish_pos,
distortion=distortion,
prob_type='binary',
noise_magnitude=0,
conn_thres=100,
verbose=verbose
)
interaction = Interaction(environment, verbose=verbose)
channel = Channel(environment)
fish = generate_all_fish(
n_fish=num_fish,
n_replica_fish= num_replica_fish,
channel=channel,
interaction=interaction,
k_coh = 0,
k_ar = k_ar,
alpha = alpha,
lim_neighbors=[0, math.inf],
neighbor_weights=1.0,
fish_max_speeds=9,
clock_freqs=clock_freqs,
verbose=verbose
)
channel.set_nodes(fish)
observer = Observer(fish=fish, environment=environment, channel=channel)
run_simulation(fish=fish, observer=observer, run_time=run_time,
dark=True, white_axis=False, no_legend=True, no_star=False,
show_dist_plot=True, plot=False)
fish_xs = observer.x
fish_ys = observer.y
neighbor_distances = observer.avg_dist
avg_speeds = observer.avg_speed
return fish_xs, fish_ys, neighbor_distances, avg_speeds
def plot_fish(ax, fish_xs, fish_ys, title):
"""
Generate a visualization of the fish in simulation
Arguements:
ax {matplotlib Axis} -- axis to plot the fish on
fish_xs {list of int lists} -- list of each fish's x position over time
indexed by fish id
fish_ys {list of int lists} -- list of each fish's y position over time.
Indexed by fish id
title {string} -- Title to add to top of plot.
"""
num_fish = len(fish_xs)
for i in range(num_fish):
c = colors[i%20]
if i != 0 and not i % 20:
c = [1.0, 1.0, 1.0, 1.0]
# Plot fish trajectories
ax.plot(fish_xs[i], fish_ys[i], c=c,
linewidth=2.0, alpha=0.4)
ax.scatter(fish_xs[i], fish_ys[i], c=c,
marker='o', alpha=0.2)
# plot fish start
ax.scatter(fish_xs[i][0], fish_ys[i][0], c=c,
marker='>', s=200, alpha=0.5)
# plot fish final
ax.scatter(fish_xs[i][-1], fish_ys[i][-1], c=c,
marker='s', s=200,alpha=1, zorder = 100)
# format black background, white axis
ax.set_facecolor((0, 0, 0))
ax.spines['top'].set_color('black')
ax.spines['right'].set_color('black')
ax.set_title(title)
def plot_dist(ax, distances):
"""
Plot the average distance between a fish and its neighbors
over the course of a simulation.
Arguements:
ax {Matplotlib Axis} -- the axis to make the plot on
distances {flot list} -- the average distance at each timestep
"""
ax.plot(range(len(distances)), distances)
ax.scatter(range(len(distances)), distances)
ax.set_xlabel("Time")
ax.set_ylabel("Mean neighbor spacing")
ax.set_title("Mean neighbor spacing over time")
def plot_speed(ax, speeds):
"""
Plot the average speed of a fish over the course of a simulation.
Arguements:
ax {Matplotlib Axis} -- the axis to make the plot on
distances {flot list} -- the average speed at each timestep
"""
ax.plot(range(len(speeds)), speeds)
ax.scatter(range(len(speeds)), speeds)
ax.set_xlabel("Time")
ax.set_ylabel("Mean swarm speed")
ax.set_title("Mean swarm speed over time")
def main():
"""
Search through varying preset parameters for k_ar. You can also
vary alpha, the goal fish distance, to see how the weight parameter's
effectiveness changes. At conclusion, the program will output a plot
for each inputs with the simulation visualized and average neighbor distance
and average speed over time. There are also two plots for neighbor distance
and average speed with data from all parameter values aggregated into
a single graph. This graph is most useful for deciding on a final value for k_ar
"""
_, (dist_ax, speed_ax) = plt.subplots(2,1)
ks = [0.03, 0.01, 0.005, 0.003]
#alphas = [0.5, 1, 2, 3.5, 4]
fish = 25
alpha = 40
initial_spread = 20
time = 20
for k in ks:
#for alpha in alphas:
xs, ys, neighbors, speeds = run_trial(time, fish, initial_spread, k, alpha)
# create figure for this trial
fig = plt.figure(figsize=(12, 8))
gridsize = (3, 2)
fish_ax = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
trial_dist_ax = plt.subplot2grid(gridsize, (2, 0))
trial_speed_ax = plt.subplot2grid(gridsize, (2, 1))
title = "{} fish, {} initial spread, {} k_ar, {} time, {} alpha".format(fish, initial_spread, k, time, alpha)
plot_fish(fish_ax, xs, ys, title)
plot_dist(trial_dist_ax, neighbors)
plot_speed(trial_speed_ax, speeds)
# Add to all parameter search figure
dist_ax.plot(range(len(neighbors)), neighbors, label = "k = {}, alpha = {}".format(k, alpha))
speed_ax.plot(range(len(speeds)), speeds, label = "k = {}, alpha = {}".format(k, alpha))
# add titles and formatting to stability fig
dist_ax.set_xlabel("Time")
dist_ax.set_ylabel("Mean neighbor spacing")
dist_ax.legend()
dist_ax.set_title("Spacing over time for varying values of k")
speed_ax.set_xlabel("Time")
speed_ax.set_ylabel("Mean swarm speed")
speed_ax.legend()
speed_ax.set_title("Swarm speed over time for varying values of k")
plt.show()
if __name__ == '__main__':
main()
| 33.78 | 117 | 0.644168 |
4a233132ed2e659c2fe215c8b426df73748556e1 | 7,478 | py | Python | PyChan/Core/Commands/Help/help.py | ErnestBytnar/PyChan-Bot | 7ce38547fa85e6f56c1702db5bea4ef2700d9b6f | [
"MIT"
] | null | null | null | PyChan/Core/Commands/Help/help.py | ErnestBytnar/PyChan-Bot | 7ce38547fa85e6f56c1702db5bea4ef2700d9b6f | [
"MIT"
] | null | null | null | PyChan/Core/Commands/Help/help.py | ErnestBytnar/PyChan-Bot | 7ce38547fa85e6f56c1702db5bea4ef2700d9b6f | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from Core.Decorators.decorators import Decorator
from Core.Commands.Settings.Functions.get_server_prefix import GetServerPrefix
class Help(commands.Cog):
"""Class contains help methods
"""
def __init__(self, bot):
"""Constructor method
"""
self.bot = bot
@commands.group(invoke_without_command=True)
@Decorator.pychan_decorator
async def help(self, ctx):
"""Sends message with built-in funtions
:param ctx: the context in which a command is called
:type ctx: discord.ext.commands.Context
"""
prefix = GetServerPrefix.get_server_prefix(self, ctx)
embed = discord.Embed(title='Help',
description=f'Wpisz `{prefix}help <nazwa_komendy>` aby uzyskać więcej informacji.\n'
'\n'
'Dostępne komendy:',
color=discord.Color.dark_purple())
embed.add_field(name='Nauka',
value='`zamiana`, `zamiana+`, `ieee754_32`, `ieee754_64`, `permutacje`, `booth`',
inline=False)
embed.add_field(name='Obraz',
value='`ocr`, `apod`, `qr`',
inline=False)
embed.add_field(name='Tekst',
value='`ciekawostka`',
inline=False)
embed.add_field(name='SKNIKOD',
value='`listaCzlonkow`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='zamiana')
async def zamiana_help(self, ctx):
embed = discord.Embed(title='Zamiana',
description='Zamienia liczbę z dowolnego systemu liczbowego na inny z przedziału <2,16>',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`zamiana <system z którego zamienamy> <do którego zamieniamy> <liczba>`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='zamiana+')
async def zamiana_z_rozpisaniem_help(self, ctx):
embed = discord.Embed(title='Zamiana+',
description='Zamienia liczbę z dowolnego systemu liczbowego na inny z przedziału <2,16>, lecz wraz z rozpisaniem pisemnym zamiany liczb',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`zamiana+ <system z którego zamienamy> <do którego zamieniamy> <liczba>`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='ieee754_32')
async def ieee754_32_help(self, ctx):
embed = discord.Embed(title='IEEE754 32bit',
description='Zamienia dowolną liczbę w systemie dziesiętnym w liczbę binarną przy użyciu zapisu liczby zmiennoprzecinkowej w standarcie IEEE754 32bit',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`ieee754_32 <liczba>`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='ieee754_64')
async def ieee754_64_help(self, ctx):
embed = discord.Embed(title='IEEE754 64bit',
description='Zamienia dowolną liczbę w systemie dziesiętnym w liczbę binarną przy użyciu zapisu liczby zmiennoprzecinkowej w standarcie IEEE754 64bit',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`ieee754_64 <liczba>`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='permutacje')
async def permutacje_help(self, ctx):
embed = discord.Embed(title='permutacje',
description='Szereg funkcji służących do obliczania permutacji',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`permutacje info <permutacja>` - wyświetla informacje o permutacji\n'
'`permutacje losuj <Sn>` - losuje permutacje w podanym Sn\n'
'`permutacje potega <wykładnik> <permutacja>` - Podnosi permutację do potęgi\n'
'`permutacje generuj <numer permutacji> <Sn>` - Generuje permutację na podstawie numeru w porządku leksykograficznym\n',
inline=False)
embed.add_field(name='Aliasy komendy',
value='`permutacje`, `perm`, `p`',
inline=False)
embed.add_field(name='Dodatkowe informacje',
value='Przykłady zapisu permutacji: `<5 2 3 1 4>` lub `(1 5 4)(2)(3)` lub `<5 1 3 2 4>#(4 2 3)#(1 2 5)`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='booth')
async def booth_help(self, ctx):
embed = discord.Embed(title='Algorytm Booth\'a',
description='Mnoży dwie liczby całkowite z użyciem algorytmu Booth\'a i wyświetla kroki.',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`booth <P> <Q>` - gdzie P i Q to liczby całkowite',
inline=False)
await ctx.send(embed=embed)
@help.command(name='ocr')
async def ocr_help(self, ctx):
embed = discord.Embed(title='OCR',
description='Wyciąga tekst z obrazka i wysyła na czat \n'
'Należy pamiętać o dołączeniu obrazka .jpg lub .png do wiadomości',
color=discord.Color.dark_purple())
await ctx.send(embed=embed)
@help.command(name='qr')
async def qr_help(self, ctx):
embed = discord.Embed(title='QR',
description='Tworzy kod QR\n'
'Przyjmuje link do strony jako argument',
color=discord.Color.dark_purple())
embed.add_field(name='Składnia',
value='`qr <link>`',
inline=False)
await ctx.send(embed=embed)
@help.command(name='apod')
async def apod_help(self, ctx):
embed = discord.Embed(title='Astronomy picture of the day',
description='Wysyła astronomiczne zdjęcie lub film dnia wraz z opisem',
color=discord.Color.dark_purple())
await ctx.send(embed=embed)
@help.command(name='listaCzlonkow')
async def get_members_projects_help(self, ctx):
embed = discord.Embed(title='listaCzlonkow',
description='Wysyła plik txt z aktualną listą członków z rolą `Członek` i przypisanymi do nich projektami',
color=discord.Color.dark_purple())
await ctx.send(embed=embed)
@help.command(name='ciekawostka')
async def fun_fact_help(self, ctx):
embed = discord.Embed(title='ciekawostka',
description='Wysyła losową ciekawostkę',
color=discord.Color.dark_purple())
await ctx.send(embed=embed)
| 49.197368 | 181 | 0.555229 |
4a233201ac22c7991fd0361f054605d527b62a96 | 2,572 | py | Python | assignments/models.py | manisharmagarg/oddnary | e2dea772d44d72773aa63c449d4082a9bf07dfe1 | [
"Apache-2.0"
] | null | null | null | assignments/models.py | manisharmagarg/oddnary | e2dea772d44d72773aa63c449d4082a9bf07dfe1 | [
"Apache-2.0"
] | null | null | null | assignments/models.py | manisharmagarg/oddnary | e2dea772d44d72773aa63c449d4082a9bf07dfe1 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from utils.base_model import BaseModel
from utils.upload_location import (
assignment_file_location,
assignment_solution_file_location
)
from courses.models import (
Course, CourseSection,
)
# Create your models here.
User = get_user_model()
class Assignment(BaseModel):
course = models.ForeignKey(Course, related_name='assignments')
course_section = models.ForeignKey(CourseSection, related_name='assignments',
null=True, blank=True)
name = models.CharField(_('name'), max_length=512)
description = models.TextField(_('description'), null=True, blank=True)
index = models.IntegerField(_('index'), default=0, blank=True)
is_active = models.BooleanField(_('is_active'), default=False)
is_deleted = models.BooleanField(_('is_deleted'), default=False)
def __str__(self):
return "{} - {}".format(self.course, self.name)
class Meta:
ordering = ('course', 'index', '-created_at',)
class AssignmentFile(BaseModel):
assignment = models.ForeignKey(Assignment, related_name='files')
name = models.CharField(_('name'), max_length=512)
file = models.FileField(_('file'), max_length=2048, upload_to=assignment_file_location)
description = models.TextField(_('description'), null=True, blank=True)
index = models.IntegerField(_('index'), default=0, blank=True)
is_active = models.BooleanField(_('is_active'), default=False)
is_deleted = models.BooleanField(_('is_deleted'), default=False)
def __str__(self):
return "{} - {}".format(self.assignment, self.name)
class Meta:
ordering = ('assignment', 'index', '-created_at',)
class AssignmentSolution(BaseModel):
assignment = models.ForeignKey(Assignment, related_name='solutions')
user = models.ForeignKey(User, related_name='assignment_solutions',)
comment = models.TextField(null=True, blank=False)
def __str__(self):
return "{}".format(self.assignment)
class AssignmentSolutionFile(BaseModel):
assignment_solution = models.ForeignKey(AssignmentSolution, related_name='files')
name = models.CharField(max_length=128, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
file = models.FileField(_('file'), max_length=2048, upload_to=assignment_solution_file_location)
def __str__(self):
return "{} - {}".format(self.assignment_solution, self.name)
| 37.275362 | 100 | 0.710731 |
4a233449a633487efd89f9568a2194ccd27eaba7 | 4,343 | py | Python | multiworld/core/image_env.py | stevenlin1111/multiworld | 7576a00b884f629ad5de86f6c8a3618770273029 | [
"MIT"
] | null | null | null | multiworld/core/image_env.py | stevenlin1111/multiworld | 7576a00b884f629ad5de86f6c8a3618770273029 | [
"MIT"
] | null | null | null | multiworld/core/image_env.py | stevenlin1111/multiworld | 7576a00b884f629ad5de86f6c8a3618770273029 | [
"MIT"
] | 1 | 2021-08-13T23:47:47.000Z | 2021-08-13T23:47:47.000Z | import cv2
import mujoco_py
import numpy as np
import warnings
from PIL import Image
from gym.spaces import Box, Dict
from multiworld.core.wrapper_env import ProxyEnv
class ImageEnv(ProxyEnv):
def __init__(
self,
wrapped_env,
imsize=84,
init_camera=None,
transpose=False,
grayscale=False,
normalize=False,
):
self.quick_init(locals())
super().__init__(wrapped_env)
self.wrapped_env.hide_goal_markers = True
self.imsize = imsize
self.init_camera = init_camera
self.transpose = transpose
self.grayscale = grayscale
self.normalize = normalize
if grayscale:
self.image_length = self.imsize * self.imsize
else:
self.image_length = 3 * self.imsize * self.imsize
# This is torch format rather than PIL image
self.image_shape = (self.imsize, self.imsize)
# Flattened past image queue
# init camera
if init_camera is not None:
sim = self._wrapped_env.initialize_camera(init_camera)
# viewer = mujoco_py.MjRenderContextOffscreen(sim, device_id=-1)
# init_camera(viewer.cam)
# sim.add_render_context(viewer)
self._render_local = False
self._img_goal = None
img_space = Box(0, 1, (self.image_length,))
spaces = self.wrapped_env.observation_space.spaces
spaces['observation'] = img_space
spaces['desired_goal'] = img_space
spaces['achieved_goal'] = img_space
spaces['image_observation'] = img_space
spaces['image_desired_goal'] = img_space
spaces['image_achieved_goal'] = img_space
self.observation_space = Dict(spaces)
def step(self, action):
obs, reward, done, info = self.wrapped_env.step(action)
new_obs = self._update_obs(obs)
return new_obs, reward, done, info
def reset(self):
obs = self.wrapped_env.reset()
env_state = self.wrapped_env.get_env_state()
self.wrapped_env.set_to_goal(self.wrapped_env.get_goal())
self._img_goal = self._get_flat_img()
self.wrapped_env.set_env_state(env_state)
return self._update_obs(obs)
def _update_obs(self, obs):
img_obs = self._get_flat_img()
obs['image_observation'] = img_obs
obs['image_desired_goal'] = self._img_goal
obs['image_achieved_goal'] = img_obs
obs['observation'] = img_obs
obs['desired_goal'] = self._img_goal
obs['achieved_goal'] = img_obs
return obs
def _get_flat_img(self):
# returns the image as a torch format np array
image_obs = self._wrapped_env.get_image()
if self._render_local:
cv2.imshow('env', image_obs)
cv2.waitKey(1)
if self.grayscale:
image_obs = Image.fromarray(image_obs).convert('L')
image_obs = np.array(image_obs)
if self.normalize:
image_obs = image_obs / 255.0
if self.transpose:
image_obs = image_obs.transpose()
return image_obs.flatten()
def enable_render(self):
self._render_local = True
"""
Multitask functions
"""
def get_goal(self):
goal = self.wrapped_env.get_goal()
goal['desired_goal'] = self._img_goal
goal['image_desired_goal'] = self._img_goal
return goal
def sample_goals(self, batch_size):
if batch_size > 1:
warnings.warn("Sampling goal images is slow")
img_goals = np.zeros((batch_size, self.image_length))
goals = self.wrapped_env.sample_goals(batch_size)
for i in range(batch_size):
goal = self.unbatchify_dict(goals, i)
self.wrapped_env.set_to_goal(goal)
img_goals[i, :] = self._get_flat_img()
goals['desired_goal'] = img_goals
goals['image_desired_goal'] = img_goals
return goals
def compute_rewards(self, achieved_goals, desired_goals, info):
return - np.linalg.norm(achieved_goals - desired_goals, axis=1)
def normalize_image(image):
assert image.dtype == np.uint8
return np.float64(image) / 255.0
def unormalize_image(image):
assert image.dtype != np.uint8
return np.uint8(image * 255.0)
| 33.666667 | 76 | 0.63067 |
4a23346e4c2c74e08459b200c85a6b9db2bf7fa1 | 306 | py | Python | good/users/apps.py | choi010521/good | 7c10ab403f681591d70d22970e2d5f1e3dea9c84 | [
"MIT"
] | null | null | null | good/users/apps.py | choi010521/good | 7c10ab403f681591d70d22970e2d5f1e3dea9c84 | [
"MIT"
] | null | null | null | good/users/apps.py | choi010521/good | 7c10ab403f681591d70d22970e2d5f1e3dea9c84 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "good.users"
verbose_name = _("Users")
def ready(self):
try:
import good.users.signals # noqa F401
except ImportError:
pass
| 21.857143 | 54 | 0.647059 |
4a23355c0c1a10731bca26dd9492fc53ad3e3837 | 70 | py | Python | dltb/tool/face/__init__.py | Petr-By/qtpyvis | 0b9a151ee6b9a56b486c2bece9c1f03414629efc | [
"MIT"
] | 3 | 2017-10-04T14:51:26.000Z | 2017-10-22T09:35:50.000Z | dltb/tool/face/__init__.py | CogSciUOS/DeepLearningToolbox | bf07578b9486d8c48e25df357bc4b9963b513b46 | [
"MIT"
] | 13 | 2017-11-26T10:05:00.000Z | 2018-03-11T14:08:40.000Z | dltb/tool/face/__init__.py | CogSciUOS/DeepLearningToolbox | bf07578b9486d8c48e25df357bc4b9963b513b46 | [
"MIT"
] | 2 | 2017-09-24T21:39:42.000Z | 2017-10-04T15:29:54.000Z | from .detector import Detector
from .landmarks import FacialLandmarks
| 23.333333 | 38 | 0.857143 |
4a23364cde0e837c9fb3ec57044e267dce0235d7 | 384 | py | Python | leetcode/__init__.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | leetcode/__init__.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | leetcode/__init__.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | from typing import Dict, List, Tuple # noqa
from .convert import main # noqa
from .etc.imported import * # noqa
from .etc.list_node import * # noqa
from .etc.tree_node import * # noqa
null = None # noqa
try:
import pkg_resources
__version__ = pkg_resources.get_distribution(__package__).version
except pkg_resources.DistributionNotFound:
__version__ = "Unknown"
| 24 | 69 | 0.739583 |
4a23385ed652576d7b9b4ad8ad3191e06514406b | 11,220 | py | Python | utils/routine.py | CreeperLin/PyTorch_ProxylessNAS | 9ab6d7bf284d31196f7cf985d29c62aa1c172c8c | [
"MIT"
] | 9 | 2019-11-06T07:19:09.000Z | 2021-09-17T02:44:49.000Z | utils/routine.py | Kyrie-Zhao/PyTorch_ProxylessNAS | 9ab6d7bf284d31196f7cf985d29c62aa1c172c8c | [
"MIT"
] | 2 | 2021-04-24T22:23:23.000Z | 2021-04-24T22:24:18.000Z | utils/routine.py | Kyrie-Zhao/PyTorch_ProxylessNAS | 9ab6d7bf284d31196f7cf985d29c62aa1c172c8c | [
"MIT"
] | 4 | 2020-03-01T06:47:41.000Z | 2021-09-17T02:44:51.000Z | # -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import itertools
import utils
from visualize import plot
from utils.profiling import tprof
import genotypes as gt
from models.nas_modules import NASModule
def save_checkpoint(out_dir, model, w_optim, a_optim, lr_scheduler, epoch, logger):
try:
save_path = os.path.join(out_dir, 'chkpt_%03d.pt' % (epoch+1))
torch.save({
'model': model.state_dict(),
'arch': NASModule.nasmod_state_dict(),
'w_optim': w_optim.state_dict(),
'a_optim': a_optim.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
}, save_path)
logger.info("Saved checkpoint to: %s" % save_path)
except Exception as e:
logger.error("Save checkpoint failed: "+str(e))
def save_genotype(out_dir, genotype, epoch, logger):
try:
logger.info("genotype = {}".format(genotype))
save_path = os.path.join(out_dir, 'gene_{:03d}.gt'.format(epoch+1))
gt.to_file(genotype, save_path)
logger.info("Saved genotype to: %s" % save_path)
except:
logger.error("Save genotype failed")
def search(out_dir, chkpt_path, w_train_loader, a_train_loader, model, arch, writer, logger, device, config):
valid_loader = a_train_loader
w_optim = utils.get_optim(model.weights(), config.w_optim)
a_optim = utils.get_optim(model.alphas(), config.a_optim)
init_epoch = -1
if chkpt_path is not None:
logger.info("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model.load_state_dict(checkpoint['model'])
NASModule.nasmod_load_state_dict(checkpoint['arch'])
w_optim.load_state_dict(checkpoint['w_optim'])
a_optim.load_state_dict(checkpoint['a_optim'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
init_epoch = checkpoint['epoch']
else:
logger.info("Starting new training run")
architect = arch(config, model)
# warmup training loop
logger.info('begin warmup training')
try:
if config.warmup_epochs > 0:
warmup_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.warmup_epochs, eta_min=config.w_optim.lr_min)
last_epoch = 0
else:
last_epoch = -1
tot_epochs = config.warmup_epochs
for epoch in itertools.count(init_epoch+1):
if epoch == tot_epochs: break
lr = warmup_lr_scheduler.get_lr()[0]
# training
train(w_train_loader, None, model, writer, logger, architect, w_optim, a_optim, lr, epoch, tot_epochs, device, config)
# validation
cur_step = (epoch+1) * len(w_train_loader)
top1 = validate(valid_loader, model, writer, logger, epoch, tot_epochs, cur_step, device, config)
warmup_lr_scheduler.step()
print("")
except KeyboardInterrupt:
print('skipped')
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.epochs, eta_min=config.w_optim.lr_min, last_epoch=last_epoch)
save_checkpoint(out_dir, model, w_optim, a_optim, lr_scheduler, init_epoch, logger)
save_genotype(out_dir, model.genotype(), init_epoch, logger)
# training loop
logger.info('begin w/a training')
best_top1 = 0.
tot_epochs = config.epochs
for epoch in itertools.count(init_epoch+1):
if epoch == tot_epochs: break
lr = lr_scheduler.get_lr()[0]
model.print_alphas(logger)
# training
train(w_train_loader, a_train_loader, model, writer, logger, architect, w_optim, a_optim, lr, epoch, tot_epochs, device, config)
# validation
cur_step = (epoch+1) * len(w_train_loader)
top1 = validate(valid_loader, model, writer, logger, epoch, tot_epochs, cur_step, device, config)
# genotype
genotype = model.genotype()
save_genotype(out_dir, genotype, epoch, logger)
# genotype as image
if config.plot:
for i, dag in enumerate(model.dags()):
plot_path = os.path.join(config.plot_path, "EP{:02d}".format(epoch+1))
caption = "Epoch {} - DAG {}".format(epoch+1, i)
plot(genotype.dag[i], dag, plot_path + "-dag_{}".format(i), caption)
if best_top1 < top1:
best_top1 = top1
best_genotype = genotype
if config.save_freq != 0 and epoch % config.save_freq == 0:
save_checkpoint(out_dir, model, w_optim, a_optim, lr_scheduler, epoch, logger)
lr_scheduler.step()
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
logger.info("Best Genotype = {}".format(best_genotype))
tprof.stat_acc('model_'+NASModule.get_device()[0])
gt.to_file(best_genotype, os.path.join(out_dir, 'best.gt'))
def augment(out_dir, chkpt_path, train_loader, valid_loader, model, writer, logger, device, config):
w_optim = utils.get_optim(model.weights(), config.w_optim)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.epochs, eta_min=config.w_optim.lr_min)
init_epoch = -1
if chkpt_path is not None:
logger.info("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model.load_state_dict(checkpoint['model'])
w_optim.load_state_dict(checkpoint['w_optim'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
init_epoch = checkpoint['epoch']
else:
logger.info("Starting new training run")
logger.info("Model params count: {:.3f} M, size: {:.3f} MB".format(utils.param_size(model), utils.param_count(model)))
# training loop
logger.info('begin training')
best_top1 = 0.
tot_epochs = config.epochs
for epoch in itertools.count(init_epoch+1):
if epoch == tot_epochs: break
drop_prob = config.drop_path_prob * epoch / tot_epochs
model.drop_path_prob(drop_prob)
lr = lr_scheduler.get_lr()[0]
# training
train(train_loader, None, model, writer, logger, None, w_optim, None, lr, epoch, tot_epochs, device, config)
lr_scheduler.step()
# validation
cur_step = (epoch+1) * len(train_loader)
top1 = validate(valid_loader, model, writer, logger, epoch, tot_epochs, cur_step, device, config)
# save
if best_top1 < top1:
best_top1 = top1
is_best = True
else:
is_best = False
if config.save_freq != 0 and epoch % config.save_freq == 0:
save_checkpoint(out_dir, model, w_optim, None, lr_scheduler, epoch, logger)
print("")
logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
tprof.stat_acc('model_'+NASModule.get_device()[0])
def train(train_loader, valid_loader, model, writer, logger, architect, w_optim, a_optim, lr, epoch, tot_epochs, device, config):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
cur_step = epoch*len(train_loader)
writer.add_scalar('train/lr', lr, cur_step)
model.train()
if not valid_loader is None:
tr_ratio = len(train_loader) // len(valid_loader)
val_iter = iter(valid_loader)
eta_m = utils.ETAMeter(tot_epochs, epoch, len(train_loader))
eta_m.start()
for step, (trn_X, trn_y) in enumerate(train_loader):
trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True)
N = trn_X.size(0)
# phase 1. child network step (w)
w_optim.zero_grad()
tprof.timer_start('train')
loss, logits = model.loss_logits(trn_X, trn_y, config.aux_weight)
tprof.timer_stop('train')
loss.backward()
# gradient clipping
if config.w_grad_clip > 0:
nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)
w_optim.step()
# phase 2. architect step (alpha)
if not valid_loader is None and step % tr_ratio == 0:
try:
val_X, val_y = next(val_iter)
except:
val_iter = iter(valid_loader)
val_X, val_y = next(val_iter)
val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True)
tprof.timer_start('arch')
architect.step(trn_X, trn_y, val_X, val_y, lr, w_optim, a_optim)
tprof.timer_stop('arch')
prec1, prec5 = utils.accuracy(logits, trn_y, topk=(1, 5))
losses.update(loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step !=0 and step % config.print_freq == 0 or step == len(train_loader)-1:
eta = eta_m.step(step)
logger.info(
"Train: [{:2d}/{}] Step {:03d}/{:03d} LR {:.3f} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%}) | ETA: {eta}".format(
epoch+1, tot_epochs, step, len(train_loader)-1, lr, losses=losses,
top1=top1, top5=top5, eta=utils.format_time(eta)))
writer.add_scalar('train/loss', loss.item(), cur_step)
writer.add_scalar('train/top1', prec1.item(), cur_step)
writer.add_scalar('train/top5', prec5.item(), cur_step)
cur_step += 1
logger.info("Train: [{:2d}/{}] Final Prec@1 {:.4%}".format(epoch+1, tot_epochs, top1.avg))
tprof.stat_acc('model_'+NASModule.get_device()[0])
tprof.print_stat('train')
tprof.print_stat('arch')
def validate(valid_loader, model, writer, logger, epoch, tot_epochs, cur_step, device, config):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
losses = utils.AverageMeter()
model.eval()
with torch.no_grad():
for step, (X, y) in enumerate(valid_loader):
X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True)
N = X.size(0)
tprof.timer_start('validate')
loss, logits = model.loss_logits(X, y, config.aux_weight)
tprof.timer_stop('validate')
prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5))
losses.update(loss.item(), N)
top1.update(prec1.item(), N)
top5.update(prec5.item(), N)
if step !=0 and step % config.print_freq == 0 or step == len(valid_loader)-1:
logger.info(
"Valid: [{:2d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} "
"Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format(
epoch+1, tot_epochs, step, len(valid_loader)-1, losses=losses,
top1=top1, top5=top5))
writer.add_scalar('val/loss', losses.avg, cur_step)
writer.add_scalar('val/top1', top1.avg, cur_step)
writer.add_scalar('val/top5', top5.avg, cur_step)
logger.info("Valid: [{:2d}/{}] Final Prec@1 {:.4%}".format(epoch+1, tot_epochs, top1.avg))
tprof.print_stat('validate')
return top1.avg
| 39.230769 | 136 | 0.62139 |
4a23385fc80157946bb482b847cbba060ebb0e30 | 2,255 | py | Python | BotData/ConfiG.py | SOUFIANEZAZA/fitness_1 | 9fcd1ed5135f34a4528f79f714538987750b0351 | [
"Unlicense"
] | 1 | 2020-11-02T12:23:37.000Z | 2020-11-02T12:23:37.000Z | BotData/ConfiG.py | SOUFIANEZAZA/fitness_1 | 9fcd1ed5135f34a4528f79f714538987750b0351 | [
"Unlicense"
] | null | null | null | BotData/ConfiG.py | SOUFIANEZAZA/fitness_1 | 9fcd1ed5135f34a4528f79f714538987750b0351 | [
"Unlicense"
] | null | null | null | from MokupData.mokupfun import *
from FunctionData.somefun import *
# After Login Displaying Options
def Option():
KMFHEADER()
OPTIONLIST()
while True:
try:
usrInp = input("\u001b[32m[+]\u001b[0m Enter Input:-")
# If statment for option selection
if usrInp == "1":
clear()
from BotData.MainBot import ig_teamhunter
ig_teamhunter()
if usrInp == "2":
clear()
from BotData.MainBot import ig_masslooker
ig_masslooker()
if usrInp == "3":
clear()
from BotData.MainBot import ig_rehashtag
ig_rehashtag()
if usrInp == "4":
clear()
from BotData.MainBot import ig_feedliker
ig_feedliker()
if usrInp == "5":
clear()
from BotData.MainBot import ig_inshackle
ig_inshackle()
if usrInp == "6":
clear()
from BotData.MainBot import ig_directmessage
ig_directmessage()
if usrInp == "7":
clear()
from BotData.MainBot import ig_nonfollowers
ig_nonfollowers()
if usrInp == "8":
clear()
from BotData.MainBot import ig_ProfileScraper
ig_ProfileScraper()
if usrInp == "9":
clear()
KMFHEADER()
INSTRUCTIONS()
DONATE()
BackInp = input("\u001b[32m[+]\u001b[0m Enter Key to Main Menu ")
if BackInp == "":
clear()
Option()
else:
clear()
Option()
if usrInp == "10":
clear()
KMFHEADER()
LOGOUT()
from BotData.MainBot import LogOut
LogOut()
else:
from BotData.MainBot import ELSE_BOT
ELSE_BOT()
except KeyboardInterrupt:
break
clear()
Option()
clear()
Option() | 28.1875 | 81 | 0.439911 |
4a2338732f1ec949d037c26165b39cb80c317142 | 623 | py | Python | services/alice/speech-kit/server.py | IlyaBerezhnoy/sensor_server | e2ab512d3596be6516ae9b4de28a8e0f767cce90 | [
"MIT"
] | 1 | 2018-05-11T13:34:07.000Z | 2018-05-11T13:34:07.000Z | services/alice/speech-kit/server.py | IlyaBerezhnoy/sensor_server | e2ab512d3596be6516ae9b4de28a8e0f767cce90 | [
"MIT"
] | null | null | null | services/alice/speech-kit/server.py | IlyaBerezhnoy/sensor_server | e2ab512d3596be6516ae9b4de28a8e0f767cce90 | [
"MIT"
] | null | null | null | #!/bin/python
from flask import Flask, jsonify, request
import subprocess
import os
app = Flask(__name__)
text = ""
greetings = "'/play' and '/replay'\n"
@app.route('/')
def index():
return greetings
@app.route('/play', methods=['POST'])
def play():
global text
text = request.data.decode('utf-8')
os.system('./play.sh "' + text + '"')
return jsonify({'played': True, "text" : text}), 201
@app.route('/replay')
def replay():
global text
os.system('./replay.sh')
return jsonify({'replayed': True, "text" : text}), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| 20.766667 | 58 | 0.616372 |
4a233885d9fd6f4a47eb3900370d6070acedb2fc | 2,487 | py | Python | marmot/representations/wmt_representation_generator.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 19 | 2015-08-21T13:06:37.000Z | 2021-07-26T09:56:29.000Z | marmot/representations/wmt_representation_generator.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 36 | 2015-01-13T13:01:07.000Z | 2016-06-22T06:59:59.000Z | marmot/representations/wmt_representation_generator.py | qe-team/marmot | 38e09ff1d0a3025a6b7edeaaf6086ed047ec45ff | [
"0BSD"
] | 8 | 2015-12-11T16:41:47.000Z | 2019-04-08T16:28:40.000Z | import os
from nltk import word_tokenize
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class WMTRepresentationGenerator(RepresentationGenerator):
def _write_to_file(self, filename, lofl):
a_file = open(filename, 'w')
for sentence in lofl:
a_file.write('%s\n' % (' '.join([w.encode('utf-8') for w in sentence])))
a_file.close()
def _parse_wmt_to_text(self, wmt_file, wmt_source_file, tmp_dir, persist=False):
# parse source files
source_sents = {}
for line in open(wmt_source_file):
str_num = line.decode('utf-8').strip().split('\t')
source_sents[str_num[0]] = word_tokenize(str_num[1])
# parse target file and write new source, target, and tag files
target, source, tags = [], [], []
cur_num = None
cur_sent, cur_tags = [], []
for line in open(wmt_file):
chunks = line[:-1].decode('utf-8').split('\t')
if chunks[0] != cur_num:
if len(cur_sent) > 0:
# check that the sentence is in source
if cur_num in source_sents:
source.append(source_sents[cur_num])
target.append(cur_sent)
tags.append(cur_tags)
cur_sent = []
cur_tags = []
cur_num = chunks[0]
cur_sent.append(chunks[2])
cur_tags.append(chunks[5])
# last sentence
if len(cur_sent) > 0 and cur_num in source_sents:
source.append(source_sents[cur_num])
target.append(cur_sent)
tags.append(cur_tags)
if persist:
tmp_dir = mk_tmp_dir(tmp_dir)
target_file = tmp_dir+'/'+os.path.basename(wmt_file)+'.target'
tags_file = tmp_dir+'/'+os.path.basename(wmt_file)+'.tags'
source_file = tmp_dir+'/'+os.path.basename(wmt_source_file)+'.txt'
self._write_to_file(target_file, target)
self._write_to_file(source_file, source)
self._write_to_file(tags_file, tags)
return {'target': target, 'source': source, 'tags': tags}
def __init__(self, tg_file, src_file, tmp_dir=None, persist=False):
self.data = self._parse_wmt_to_text(tg_file, src_file, tmp_dir, persist=persist)
def generate(self):
return self.data
| 38.859375 | 88 | 0.592682 |
4a23390822658c8601855d27ba75629f3e6d82ed | 58,226 | py | Python | statsmodels/tsa/statespace/dynamic_factor.py | AustinJAdams/statsmodels | e6632b6466dc7eb7062df0f26a6888da0e67e347 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/tsa/statespace/dynamic_factor.py | AustinJAdams/statsmodels | e6632b6466dc7eb7062df0f26a6888da0e67e347 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/tsa/statespace/dynamic_factor.py | AustinJAdams/statsmodels | e6632b6466dc7eb7062df0f26a6888da0e67e347 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Dynamic factor model
Author: Chad Fulton
License: Simplified-BSD
"""
from warnings import warn
from collections import OrderedDict
import numpy as np
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
is_invertible, prepare_exog,
constrain_stationary_univariate, unconstrain_stationary_univariate,
constrain_stationary_multivariate, unconstrain_stationary_multivariate
)
from statsmodels.multivariate.pca import PCA
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ValueWarning
import statsmodels.base.wrapper as wrap
from statsmodels.compat.pandas import Appender
class DynamicFactor(MLEModel):
r"""
Dynamic factor model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors for the observation equation, shaped
nobs x k_exog.
k_factors : int
The number of unobserved factors.
factor_order : int
The order of the vector autoregression followed by the factors.
error_cov_type : {'scalar', 'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the observation error term,
where "unstructured" puts no restrictions on the matrix, "diagonal"
requires it to be any diagonal matrix (uncorrelated errors), and
"scalar" requires it to be a scalar times the identity matrix. Default
is "diagonal".
error_order : int, optional
The order of the vector autoregression followed by the observation
error component. Default is None, corresponding to white noise errors.
error_var : bool, optional
Whether or not to model the errors jointly via a vector autoregression,
rather than as individual autoregressions. Has no effect unless
`error_order` is set. Default is False.
enforce_stationarity : bool, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
exog : array_like, optional
Array of exogenous regressors for the observation equation, shaped
nobs x k_exog.
k_factors : int
The number of unobserved factors.
factor_order : int
The order of the vector autoregression followed by the factors.
error_cov_type : {'diagonal', 'unstructured'}
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors).
error_order : int
The order of the vector autoregression followed by the observation
error component.
error_var : bool
Whether or not to model the errors jointly via a vector autoregression,
rather than as individual autoregressions. Has no effect unless
`error_order` is set.
enforce_stationarity : bool, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
Notes
-----
The dynamic factor model considered here is in the so-called static form,
and is specified:
.. math::
y_t & = \Lambda f_t + B x_t + u_t \\
f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + \eta_t \\
u_t & = C_1 u_{t-1} + \dots + C_1 f_{t-q} + \varepsilon_t
where there are `k_endog` observed series and `k_factors` unobserved
factors. Thus :math:`y_t` is a `k_endog` x 1 vector and :math:`f_t` is a
`k_factors` x 1 vector.
:math:`x_t` are optional exogenous vectors, shaped `k_exog` x 1.
:math:`\eta_t` and :math:`\varepsilon_t` are white noise error terms. In
order to identify the factors, :math:`Var(\eta_t) = I`. Denote
:math:`Var(\varepsilon_t) \equiv \Sigma`.
Options related to the unobserved factors:
- `k_factors`: this is the dimension of the vector :math:`f_t`, above.
To exclude factors completely, set `k_factors = 0`.
- `factor_order`: this is the number of lags to include in the factor
evolution equation, and corresponds to :math:`p`, above. To have static
factors, set `factor_order = 0`.
Options related to the observation error term :math:`u_t`:
- `error_order`: the number of lags to include in the error evolution
equation; corresponds to :math:`q`, above. To have white noise errors,
set `error_order = 0` (this is the default).
- `error_cov_type`: this controls the form of the covariance matrix
:math:`\Sigma`. If it is "dscalar", then :math:`\Sigma = \sigma^2 I`. If
it is "diagonal", then
:math:`\Sigma = \text{diag}(\sigma_1^2, \dots, \sigma_n^2)`. If it is
"unstructured", then :math:`\Sigma` is any valid variance / covariance
matrix (i.e. symmetric and positive definite).
- `error_var`: this controls whether or not the errors evolve jointly
according to a VAR(q), or individually according to separate AR(q)
processes. In terms of the formulation above, if `error_var = False`,
then the matrices :math:C_i` are diagonal, otherwise they are general
VAR matrices.
References
----------
.. [*] Lütkepohl, Helmut. 2007.
New Introduction to Multiple Time Series Analysis.
Berlin: Springer.
"""
def __init__(self, endog, k_factors, factor_order, exog=None,
error_order=0, error_var=False, error_cov_type='diagonal',
enforce_stationarity=True, **kwargs):
# Model properties
self.enforce_stationarity = enforce_stationarity
# Factor-related properties
self.k_factors = k_factors
self.factor_order = factor_order
# Error-related properties
self.error_order = error_order
self.error_var = error_var and error_order > 0
self.error_cov_type = error_cov_type
# Exogenous data
(self.k_exog, exog) = prepare_exog(exog)
# Note: at some point in the future might add state regression, as in
# SARIMAX.
self.mle_regression = self.k_exog > 0
# We need to have an array or pandas at this point
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog, order='C')
# Save some useful model orders, internally used
k_endog = endog.shape[1] if endog.ndim > 1 else 1
self._factor_order = max(1, self.factor_order) * self.k_factors
self._error_order = self.error_order * k_endog
# Calculate the number of states
k_states = self._factor_order
k_posdef = self.k_factors
if self.error_order > 0:
k_states += self._error_order
k_posdef += k_endog
if k_states == 0:
k_states = 1
k_posdef = 1
# Test for non-multivariate endog
if k_endog < 2:
raise ValueError('The dynamic factors model is only valid for'
' multivariate time series.')
# Test for too many factors
if self.k_factors >= k_endog:
raise ValueError('Number of factors must be less than the number'
' of endogenous variables.')
# Test for invalid error_cov_type
if self.error_cov_type not in ['scalar', 'diagonal', 'unstructured']:
raise ValueError('Invalid error covariance matrix type'
' specification.')
# By default, initialize as stationary
kwargs.setdefault('initialization', 'stationary')
# Initialize the state space model
super(DynamicFactor, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set as time-varying model if we have exog
if self.k_exog > 0:
self.ssm._time_invariant = False
# Initialize the components
self.parameters = OrderedDict()
self._initialize_loadings()
self._initialize_exog()
self._initialize_error_cov()
self._initialize_factor_transition()
self._initialize_error_transition()
self.k_params = sum(self.parameters.values())
# Cache parameter vector slices
def _slice(key, offset):
length = self.parameters[key]
param_slice = np.s_[offset:offset + length]
offset += length
return param_slice, offset
offset = 0
self._params_loadings, offset = _slice('factor_loadings', offset)
self._params_exog, offset = _slice('exog', offset)
self._params_error_cov, offset = _slice('error_cov', offset)
self._params_factor_transition, offset = (
_slice('factor_transition', offset))
self._params_error_transition, offset = (
_slice('error_transition', offset))
# Update _init_keys attached by super
self._init_keys += ['k_factors', 'factor_order', 'error_order',
'error_var', 'error_cov_type',
'enforce_stationarity'] + list(kwargs.keys())
def _initialize_loadings(self):
# Initialize the parameters
self.parameters['factor_loadings'] = self.k_endog * self.k_factors
# Setup fixed components of state space matrices
if self.error_order > 0:
start = self._factor_order
end = self._factor_order + self.k_endog
self.ssm['design', :, start:end] = np.eye(self.k_endog)
# Setup indices of state space matrices
self._idx_loadings = np.s_['design', :, :self.k_factors]
def _initialize_exog(self):
# Initialize the parameters
self.parameters['exog'] = self.k_exog * self.k_endog
# If we have exog effects, then the obs intercept needs to be
# time-varying
if self.k_exog > 0:
self.ssm['obs_intercept'] = np.zeros((self.k_endog, self.nobs))
# Setup indices of state space matrices
self._idx_exog = np.s_['obs_intercept', :self.k_endog, :]
def _initialize_error_cov(self):
if self.error_cov_type == 'scalar':
self._initialize_error_cov_diagonal(scalar=True)
elif self.error_cov_type == 'diagonal':
self._initialize_error_cov_diagonal(scalar=False)
elif self.error_cov_type == 'unstructured':
self._initialize_error_cov_unstructured()
def _initialize_error_cov_diagonal(self, scalar=False):
# Initialize the parameters
self.parameters['error_cov'] = 1 if scalar else self.k_endog
# Setup fixed components of state space matrices
# Setup indices of state space matrices
k_endog = self.k_endog
k_factors = self.k_factors
idx = np.diag_indices(k_endog)
if self.error_order > 0:
matrix = 'state_cov'
idx = (idx[0] + k_factors, idx[1] + k_factors)
else:
matrix = 'obs_cov'
self._idx_error_cov = (matrix,) + idx
def _initialize_error_cov_unstructured(self):
# Initialize the parameters
k_endog = self.k_endog
self.parameters['error_cov'] = int(k_endog * (k_endog + 1) / 2)
# Setup fixed components of state space matrices
# Setup indices of state space matrices
self._idx_lower_error_cov = np.tril_indices(self.k_endog)
if self.error_order > 0:
start = self.k_factors
end = self.k_factors + self.k_endog
self._idx_error_cov = (
np.s_['state_cov', start:end, start:end])
else:
self._idx_error_cov = np.s_['obs_cov', :, :]
def _initialize_factor_transition(self):
order = self.factor_order * self.k_factors
k_factors = self.k_factors
# Initialize the parameters
self.parameters['factor_transition'] = (
self.factor_order * self.k_factors**2)
# Setup fixed components of state space matrices
# VAR(p) for factor transition
if self.k_factors > 0:
if self.factor_order > 0:
self.ssm['transition', k_factors:order, :order - k_factors] = (
np.eye(order - k_factors))
self.ssm['selection', :k_factors, :k_factors] = np.eye(k_factors)
# Identification requires constraining the state covariance to an
# identity matrix
self.ssm['state_cov', :k_factors, :k_factors] = np.eye(k_factors)
# Setup indices of state space matrices
self._idx_factor_transition = np.s_['transition', :k_factors, :order]
def _initialize_error_transition(self):
# Initialize the appropriate situation
if self.error_order == 0:
self._initialize_error_transition_white_noise()
else:
# Generic setup fixed components of state space matrices
# VAR(q) for error transition
# (in the individual AR case, we still have the VAR(q) companion
# matrix structure, but force the coefficient matrices to be
# diagonal)
k_endog = self.k_endog
k_factors = self.k_factors
_factor_order = self._factor_order
_error_order = self._error_order
_slice = np.s_['selection',
_factor_order:_factor_order + k_endog,
k_factors:k_factors + k_endog]
self.ssm[_slice] = np.eye(k_endog)
_slice = np.s_[
'transition',
_factor_order + k_endog:_factor_order + _error_order,
_factor_order:_factor_order + _error_order - k_endog]
self.ssm[_slice] = np.eye(_error_order - k_endog)
# Now specialized setups
if self.error_var:
self._initialize_error_transition_var()
else:
self._initialize_error_transition_individual()
def _initialize_error_transition_white_noise(self):
# Initialize the parameters
self.parameters['error_transition'] = 0
# No fixed components of state space matrices
# Setup indices of state space matrices (just an empty slice)
self._idx_error_transition = np.s_['transition', 0:0, 0:0]
def _initialize_error_transition_var(self):
k_endog = self.k_endog
_factor_order = self._factor_order
_error_order = self._error_order
# Initialize the parameters
self.parameters['error_transition'] = _error_order * k_endog
# Fixed components already setup above
# Setup indices of state space matrices
# Here we want to set all of the elements of the coefficient matrices,
# the same as in a VAR specification
self._idx_error_transition = np.s_[
'transition',
_factor_order:_factor_order + k_endog,
_factor_order:_factor_order + _error_order]
def _initialize_error_transition_individual(self):
k_endog = self.k_endog
_error_order = self._error_order
# Initialize the parameters
self.parameters['error_transition'] = _error_order
# Fixed components already setup above
# Setup indices of state space matrices
# Here we want to set only the diagonal elements of the coefficient
# matrices, and we want to set them in order by equation, not by
# matrix (i.e. set the first element of the first matrix's diagonal,
# then set the first element of the second matrix's diagonal, then...)
# The basic setup is a tiled list of diagonal indices, one for each
# coefficient matrix
idx = np.tile(np.diag_indices(k_endog), self.error_order)
# Now we need to shift the rows down to the correct location
row_shift = self._factor_order
# And we need to shift the columns in an increasing way
col_inc = self._factor_order + np.repeat(
[i * k_endog for i in range(self.error_order)], k_endog)
idx[0] += row_shift
idx[1] += col_inc
# Make a copy (without the row shift) so that we can easily get the
# diagonal parameters back out of a generic coefficients matrix array
idx_diag = idx.copy()
idx_diag[0] -= row_shift
idx_diag[1] -= self._factor_order
idx_diag = idx_diag[:, np.lexsort((idx_diag[1], idx_diag[0]))]
self._idx_error_diag = (idx_diag[0], idx_diag[1])
# Finally, we want to fill the entries in in the correct order, which
# is to say we want to fill in lexicographically, first by row then by
# column
idx = idx[:, np.lexsort((idx[1], idx[0]))]
self._idx_error_transition = np.s_['transition', idx[0], idx[1]]
def clone(self, endog, exog=None, **kwargs):
return self._clone_from_init_kwds(endog, exog, **kwargs)
@property
def _res_classes(self):
return {'fit': (DynamicFactorResults, DynamicFactorResultsWrapper)}
@property
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
endog = self.endog.copy()
# 1. Factor loadings (estimated via PCA)
if self.k_factors > 0:
# Use principal components + OLS as starting values
res_pca = PCA(endog, ncomp=self.k_factors)
mod_ols = OLS(endog, res_pca.factors)
res_ols = mod_ols.fit()
# Using OLS params for the loadings tends to gives higher starting
# log-likelihood.
params[self._params_loadings] = res_ols.params.T.ravel()
# params[self._params_loadings] = res_pca.loadings.ravel()
# However, using res_ols.resid tends to causes non-invertible
# starting VAR coefficients for error VARs
# endog = res_ols.resid
endog = endog - np.dot(res_pca.factors, res_pca.loadings.T)
# 2. Exog (OLS on residuals)
if self.k_exog > 0:
mod_ols = OLS(endog, exog=self.exog)
res_ols = mod_ols.fit()
# In the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...
params[self._params_exog] = res_ols.params.T.ravel()
endog = res_ols.resid
# 3. Factors (VAR on res_pca.factors)
stationary = True
if self.k_factors > 1 and self.factor_order > 0:
# 3a. VAR transition (OLS on factors estimated via PCA)
mod_factors = VAR(res_pca.factors)
res_factors = mod_factors.fit(maxlags=self.factor_order, ic=None,
trend='nc')
# Save the parameters
params[self._params_factor_transition] = (
res_factors.params.T.ravel())
# Test for stationarity
coefficient_matrices = (
params[self._params_factor_transition].reshape(
self.k_factors * self.factor_order, self.k_factors
).T
).reshape(self.k_factors, self.k_factors, self.factor_order).T
stationary = is_invertible([1] + list(-coefficient_matrices))
elif self.k_factors > 0 and self.factor_order > 0:
# 3b. AR transition
Y = res_pca.factors[self.factor_order:]
X = lagmat(res_pca.factors, self.factor_order, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
stationary = is_invertible(np.r_[1, -params_ar.squeeze()])
params[self._params_factor_transition] = params_ar[:, 0]
# Check for stationarity
if not stationary and self.enforce_stationarity:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# 4. Errors
if self.error_order == 0:
if self.error_cov_type == 'scalar':
params[self._params_error_cov] = endog.var(axis=0).mean()
elif self.error_cov_type == 'diagonal':
params[self._params_error_cov] = endog.var(axis=0)
elif self.error_cov_type == 'unstructured':
cov_factor = np.diag(endog.std(axis=0))
params[self._params_error_cov] = (
cov_factor[self._idx_lower_error_cov].ravel())
else:
mod_errors = VAR(endog)
res_errors = mod_errors.fit(maxlags=self.error_order, ic=None,
trend='nc')
# Test for stationarity
coefficient_matrices = (
np.array(res_errors.params.T).ravel().reshape(
self.k_endog * self.error_order, self.k_endog
).T
).reshape(self.k_endog, self.k_endog, self.error_order).T
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary and self.enforce_stationarity:
raise ValueError('Non-stationary starting error autoregressive'
' parameters found with'
' `enforce_stationarity` set to True.')
# Get the error autoregressive parameters
if self.error_var:
params[self._params_error_transition] = (
np.array(res_errors.params.T).ravel())
else:
# In the case of individual autoregressions, extract just the
# diagonal elements
params[self._params_error_transition] = (
res_errors.params.T[self._idx_error_diag])
# Get the error covariance parameters
if self.error_cov_type == 'scalar':
params[self._params_error_cov] = (
res_errors.sigma_u.diagonal().mean())
elif self.error_cov_type == 'diagonal':
params[self._params_error_cov] = res_errors.sigma_u.diagonal()
elif self.error_cov_type == 'unstructured':
try:
cov_factor = np.linalg.cholesky(res_errors.sigma_u)
except np.linalg.LinAlgError:
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean()**0.5)
cov_factor = np.eye(res_errors.sigma_u.shape[0]) * (
res_errors.sigma_u.diagonal().mean()**0.5)
params[self._params_error_cov] = (
cov_factor[self._idx_lower_error_cov].ravel())
return params
@property
def param_names(self):
param_names = []
endog_names = self.endog_names
# 1. Factor loadings
param_names += [
'loading.f%d.%s' % (j+1, endog_names[i])
for i in range(self.k_endog)
for j in range(self.k_factors)
]
# 2. Exog
# Recall these are in the form: beta.x1.y1, beta.x2.y1, beta.x1.y2, ...
param_names += [
'beta.%s.%s' % (self.exog_names[j], endog_names[i])
for i in range(self.k_endog)
for j in range(self.k_exog)
]
# 3. Error covariances
if self.error_cov_type == 'scalar':
param_names += ['sigma2']
elif self.error_cov_type == 'diagonal':
param_names += [
'sigma2.%s' % endog_names[i]
for i in range(self.k_endog)
]
elif self.error_cov_type == 'unstructured':
param_names += [
('sqrt.var.%s' % endog_names[i] if i == j else
'sqrt.cov.%s.%s' % (endog_names[j], endog_names[i]))
for i in range(self.k_endog)
for j in range(i+1)
]
# 4. Factor transition VAR
param_names += [
'L%d.f%d.f%d' % (i+1, k+1, j+1)
for j in range(self.k_factors)
for i in range(self.factor_order)
for k in range(self.k_factors)
]
# 5. Error transition VAR
if self.error_var:
param_names += [
'L%d.e(%s).e(%s)' % (i+1, endog_names[k], endog_names[j])
for j in range(self.k_endog)
for i in range(self.error_order)
for k in range(self.k_endog)
]
else:
param_names += [
'L%d.e(%s).e(%s)' % (i+1, endog_names[j], endog_names[j])
for j in range(self.k_endog)
for i in range(self.error_order)
]
return param_names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evaluation.
Notes
-----
Constrains the factor transition to be stationary and variances to be
positive.
"""
unconstrained = np.array(unconstrained, ndmin=1)
dtype = unconstrained.dtype
constrained = np.zeros(unconstrained.shape, dtype=dtype)
# 1. Factor loadings
# The factor loadings do not need to be adjusted
constrained[self._params_loadings] = (
unconstrained[self._params_loadings])
# 2. Exog
# The regression coefficients do not need to be adjusted
constrained[self._params_exog] = (
unconstrained[self._params_exog])
# 3. Error covariances
# If we have variances, force them to be positive
if self.error_cov_type in ['scalar', 'diagonal']:
constrained[self._params_error_cov] = (
unconstrained[self._params_error_cov]**2)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
constrained[self._params_error_cov] = (
unconstrained[self._params_error_cov])
# 4. Factor transition VAR
# VAR transition: optionally force to be stationary
if self.enforce_stationarity and self.factor_order > 0:
# Transform the parameters
unconstrained_matrices = (
unconstrained[self._params_factor_transition].reshape(
self.k_factors, self._factor_order))
# This is always an identity matrix, but because the transform
# done prior to update (where the ssm representation matrices
# change), it may be complex
cov = self.ssm['state_cov', :self.k_factors, :self.k_factors].real
coefficient_matrices, variance = (
constrain_stationary_multivariate(unconstrained_matrices, cov))
constrained[self._params_factor_transition] = (
coefficient_matrices.ravel())
else:
constrained[self._params_factor_transition] = (
unconstrained[self._params_factor_transition])
# 5. Error transition VAR
# VAR transition: optionally force to be stationary
if self.enforce_stationarity and self.error_order > 0:
# Joint VAR specification
if self.error_var:
unconstrained_matrices = (
unconstrained[self._params_error_transition].reshape(
self.k_endog, self._error_order))
start = self.k_factors
end = self.k_factors + self.k_endog
cov = self.ssm['state_cov', start:end, start:end].real
coefficient_matrices, variance = (
constrain_stationary_multivariate(
unconstrained_matrices, cov))
constrained[self._params_error_transition] = (
coefficient_matrices.ravel())
# Separate AR specifications
else:
coefficients = (
unconstrained[self._params_error_transition].copy())
for i in range(self.k_endog):
start = i * self.error_order
end = (i + 1) * self.error_order
coefficients[start:end] = constrain_stationary_univariate(
coefficients[start:end])
constrained[self._params_error_transition] = coefficients
else:
constrained[self._params_error_transition] = (
unconstrained[self._params_error_transition])
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evaluation, to
be transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
constrained = np.array(constrained, ndmin=1)
dtype = constrained.dtype
unconstrained = np.zeros(constrained.shape, dtype=dtype)
# 1. Factor loadings
# The factor loadings do not need to be adjusted
unconstrained[self._params_loadings] = (
constrained[self._params_loadings])
# 2. Exog
# The regression coefficients do not need to be adjusted
unconstrained[self._params_exog] = (
constrained[self._params_exog])
# 3. Error covariances
# If we have variances, force them to be positive
if self.error_cov_type in ['scalar', 'diagonal']:
unconstrained[self._params_error_cov] = (
constrained[self._params_error_cov]**0.5)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
unconstrained[self._params_error_cov] = (
constrained[self._params_error_cov])
# 3. Factor transition VAR
# VAR transition: optionally force to be stationary
if self.enforce_stationarity and self.factor_order > 0:
# Transform the parameters
constrained_matrices = (
constrained[self._params_factor_transition].reshape(
self.k_factors, self._factor_order))
cov = self.ssm['state_cov', :self.k_factors, :self.k_factors].real
coefficient_matrices, variance = (
unconstrain_stationary_multivariate(
constrained_matrices, cov))
unconstrained[self._params_factor_transition] = (
coefficient_matrices.ravel())
else:
unconstrained[self._params_factor_transition] = (
constrained[self._params_factor_transition])
# 5. Error transition VAR
# VAR transition: optionally force to be stationary
if self.enforce_stationarity and self.error_order > 0:
# Joint VAR specification
if self.error_var:
constrained_matrices = (
constrained[self._params_error_transition].reshape(
self.k_endog, self._error_order))
start = self.k_factors
end = self.k_factors + self.k_endog
cov = self.ssm['state_cov', start:end, start:end].real
coefficient_matrices, variance = (
unconstrain_stationary_multivariate(
constrained_matrices, cov))
unconstrained[self._params_error_transition] = (
coefficient_matrices.ravel())
# Separate AR specifications
else:
coefficients = (
constrained[self._params_error_transition].copy())
for i in range(self.k_endog):
start = i * self.error_order
end = (i + 1) * self.error_order
coefficients[start:end] = (
unconstrain_stationary_univariate(
coefficients[start:end]))
unconstrained[self._params_error_transition] = coefficients
else:
unconstrained[self._params_error_transition] = (
constrained[self._params_error_transition])
return unconstrained
def _validate_can_fix_params(self, param_names):
super(DynamicFactor, self)._validate_can_fix_params(param_names)
ix = np.cumsum(list(self.parameters.values()))[:-1]
(_, _, _, factor_transition_names, error_transition_names) = [
arr.tolist() for arr in np.array_split(self.param_names, ix)]
if self.enforce_stationarity and self.factor_order > 0:
if self.k_factors > 1 or self.factor_order > 1:
fix_all = param_names.issuperset(factor_transition_names)
fix_any = (
len(param_names.intersection(factor_transition_names)) > 0)
if fix_any and not fix_all:
raise ValueError(
'Cannot fix individual factor transition parameters'
' when `enforce_stationarity=True`. In this case,'
' must either fix all factor transition parameters or'
' none.')
if self.enforce_stationarity and self.error_order > 0:
if self.error_var or self.error_order > 1:
fix_all = param_names.issuperset(error_transition_names)
fix_any = (
len(param_names.intersection(error_transition_names)) > 0)
if fix_any and not fix_all:
raise ValueError(
'Cannot fix individual error transition parameters'
' when `enforce_stationarity=True`. In this case,'
' must either fix all error transition parameters or'
' none.')
def update(self, params, transformed=True, includes_fixed=False,
complex_step=False):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : bool, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
Notes
-----
Let `n = k_endog`, `m = k_factors`, and `p = factor_order`. Then the
`params` vector has length
:math:`[n \times m] + [n] + [m^2 \times p]`.
It is expanded in the following way:
- The first :math:`n \times m` parameters fill out the factor loading
matrix, starting from the [0,0] entry and then proceeding along rows.
These parameters are not modified in `transform_params`.
- The next :math:`n` parameters provide variances for the error_cov
errors in the observation equation. They fill in the diagonal of the
observation covariance matrix, and are constrained to be positive by
`transofrm_params`.
- The next :math:`m^2 \times p` parameters are used to create the `p`
coefficient matrices for the vector autoregression describing the
factor transition. They are transformed in `transform_params` to
enforce stationarity of the VAR(p). They are placed so as to make
the transition matrix a companion matrix for the VAR. In particular,
we assume that the first :math:`m^2` parameters fill the first
coefficient matrix (starting at [0,0] and filling along rows), the
second :math:`m^2` parameters fill the second matrix, etc.
"""
params = self.handle_params(params, transformed=transformed,
includes_fixed=includes_fixed)
# 1. Factor loadings
# Update the design / factor loading matrix
self.ssm[self._idx_loadings] = (
params[self._params_loadings].reshape(self.k_endog, self.k_factors)
)
# 2. Exog
if self.k_exog > 0:
exog_params = params[self._params_exog].reshape(
self.k_endog, self.k_exog).T
self.ssm[self._idx_exog] = np.dot(self.exog, exog_params).T
# 3. Error covariances
if self.error_cov_type in ['scalar', 'diagonal']:
self.ssm[self._idx_error_cov] = (
params[self._params_error_cov])
elif self.error_cov_type == 'unstructured':
error_cov_lower = np.zeros((self.k_endog, self.k_endog),
dtype=params.dtype)
error_cov_lower[self._idx_lower_error_cov] = (
params[self._params_error_cov])
self.ssm[self._idx_error_cov] = (
np.dot(error_cov_lower, error_cov_lower.T))
# 4. Factor transition VAR
self.ssm[self._idx_factor_transition] = (
params[self._params_factor_transition].reshape(
self.k_factors, self.factor_order * self.k_factors))
# 5. Error transition VAR
if self.error_var:
self.ssm[self._idx_error_transition] = (
params[self._params_error_transition].reshape(
self.k_endog, self._error_order))
else:
self.ssm[self._idx_error_transition] = (
params[self._params_error_transition])
class DynamicFactorResults(MLEResults):
"""
Class to hold results from fitting an DynamicFactor model.
Parameters
----------
model : DynamicFactor instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the DynamicFactor model
instance.
coefficient_matrices_var : array
Array containing autoregressive lag polynomial coefficient matrices,
ordered from lowest degree to highest.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type=None,
**kwargs):
super(DynamicFactorResults, self).__init__(model, params,
filter_results, cov_type,
**kwargs)
self.df_resid = np.inf # attribute required for wald tests
self.specification = Bunch(**{
# Model properties
'k_endog': self.model.k_endog,
'enforce_stationarity': self.model.enforce_stationarity,
# Factor-related properties
'k_factors': self.model.k_factors,
'factor_order': self.model.factor_order,
# Error-related properties
'error_order': self.model.error_order,
'error_var': self.model.error_var,
'error_cov_type': self.model.error_cov_type,
# Other properties
'k_exog': self.model.k_exog
})
# Polynomials / coefficient matrices
self.coefficient_matrices_var = None
if self.model.factor_order > 0:
ar_params = (
np.array(self.params[self.model._params_factor_transition]))
k_factors = self.model.k_factors
factor_order = self.model.factor_order
self.coefficient_matrices_var = (
ar_params.reshape(k_factors * factor_order, k_factors).T
).reshape(k_factors, k_factors, factor_order).T
self.coefficient_matrices_error = None
if self.model.error_order > 0:
ar_params = (
np.array(self.params[self.model._params_error_transition]))
k_endog = self.model.k_endog
error_order = self.model.error_order
if self.model.error_var:
self.coefficient_matrices_error = (
ar_params.reshape(k_endog * error_order, k_endog).T
).reshape(k_endog, k_endog, error_order).T
else:
mat = np.zeros((k_endog, k_endog * error_order))
mat[self.model._idx_error_diag] = ar_params
self.coefficient_matrices_error = (
mat.T.reshape(error_order, k_endog, k_endog))
@property
def factors(self):
"""
Estimates of unobserved factors
Returns
-------
out : Bunch
Has the following attributes shown in Notes.
Notes
-----
The output is a bunch of the following format:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
# If present, level is always the first component of the state vector
out = None
spec = self.specification
if spec.k_factors > 0:
offset = 0
end = spec.k_factors
res = self.filter_results
out = Bunch(
filtered=res.filtered_state[offset:end],
filtered_cov=res.filtered_state_cov[offset:end, offset:end],
smoothed=None, smoothed_cov=None,
offset=offset)
if self.smoothed_state is not None:
out.smoothed = self.smoothed_state[offset:end]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.smoothed_state_cov[offset:end, offset:end])
return out
@cache_readonly
def coefficients_of_determination(self):
"""
Coefficients of determination (:math:`R^2`) from regressions of
individual estimated factors on endogenous variables.
Returns
-------
coefficients_of_determination : array
A `k_endog` x `k_factors` array, where
`coefficients_of_determination[i, j]` represents the :math:`R^2`
value from a regression of factor `j` and a constant on endogenous
variable `i`.
Notes
-----
Although it can be difficult to interpret the estimated factor loadings
and factors, it is often helpful to use the coefficients of
determination from univariate regressions to assess the importance of
each factor in explaining the variation in each endogenous variable.
In models with many variables and factors, this can sometimes lend
interpretation to the factors (for example sometimes one factor will
load primarily on real variables and another on nominal variables).
See Also
--------
plot_coefficients_of_determination
"""
from statsmodels.tools import add_constant
spec = self.specification
coefficients = np.zeros((spec.k_endog, spec.k_factors))
which = 'filtered' if self.smoothed_state is None else 'smoothed'
for i in range(spec.k_factors):
exog = add_constant(self.factors[which][i])
for j in range(spec.k_endog):
endog = self.filter_results.endog[j]
coefficients[j, i] = OLS(endog, exog).fit().rsquared
return coefficients
def plot_coefficients_of_determination(self, endog_labels=None,
fig=None, figsize=None):
"""
Plot the coefficients of determination
Parameters
----------
endog_labels : bool, optional
Whether or not to label the endogenous variables along the x-axis
of the plots. Default is to include labels if there are 5 or fewer
endogenous variables.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a `k_factors` x 1 plot grid. The `i`th plot shows a bar plot
of the coefficients of determination associated with factor `i`. The
endogenous variables are arranged along the x-axis according to their
position in the `endog` array.
See Also
--------
coefficients_of_determination
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
spec = self.specification
# Should we label endogenous variables?
if endog_labels is None:
endog_labels = spec.k_endog <= 5
# Plot the coefficients of determination
coefficients_of_determination = self.coefficients_of_determination
plot_idx = 1
locations = np.arange(spec.k_endog)
for coeffs in coefficients_of_determination.T:
# Create the new axis
ax = fig.add_subplot(spec.k_factors, 1, plot_idx)
ax.set_ylim((0, 1))
ax.set(title='Factor %i' % plot_idx, ylabel=r'$R^2$')
bars = ax.bar(locations, coeffs)
if endog_labels:
width = bars[0].get_width()
ax.xaxis.set_ticks(locations + width / 2)
ax.xaxis.set_ticklabels(self.model.endog_names)
else:
ax.set(xlabel='Endogenous variables')
ax.xaxis.set_ticks([])
plot_idx += 1
return fig
def get_prediction(self, start=None, end=None, dynamic=False, index=None,
exog=None, **kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : bool, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = self.model._index[0]
# Handle end (e.g. date)
_start, _end, _out_of_sample, prediction_index = (
self.model._get_prediction_index(start, end, index, silent=True))
# Handle exogenous parameters
if _out_of_sample and self.model.k_exog > 0:
# Create a new faux VARMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
try:
exog = exog.reshape(required_exog_shape)
except ValueError:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
# TODO replace with init_kwds or specification or similar
model = DynamicFactor(
endog,
k_factors=self.model.k_factors,
factor_order=self.model.factor_order,
exog=exog,
error_order=self.model.error_order,
error_var=self.model.error_var,
error_cov_type=self.model.error_cov_type,
enforce_stationarity=self.model.enforce_stationarity
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.', ValueWarning)
return super(DynamicFactorResults, self).get_prediction(
start=start, end=end, dynamic=dynamic, index=index, exog=exog,
**kwargs)
@Appender(MLEResults.summary.__doc__)
def summary(self, alpha=.05, start=None, separate_params=True):
from statsmodels.iolib.summary import summary_params
spec = self.specification
# Create the model name
model_name = []
if spec.k_factors > 0:
if spec.factor_order > 0:
model_type = ('DynamicFactor(factors=%d, order=%d)' %
(spec.k_factors, spec.factor_order))
else:
model_type = 'StaticFactor(factors=%d)' % spec.k_factors
model_name.append(model_type)
if spec.k_exog > 0:
model_name.append('%d regressors' % spec.k_exog)
else:
model_name.append('SUR(%d regressors)' % spec.k_exog)
if spec.error_order > 0:
error_type = 'VAR' if spec.error_var else 'AR'
model_name.append('%s(%d) errors' % (error_type, spec.error_order))
summary = super(DynamicFactorResults, self).summary(
alpha=alpha, start=start, model_name=model_name,
display_params=not separate_params
)
if separate_params:
indices = np.arange(len(self.params))
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.zvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
'.'.join(name.split('.')[:-1]) if strip_end else name
for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
k_endog = self.model.k_endog
k_exog = self.model.k_exog
k_factors = self.model.k_factors
factor_order = self.model.factor_order
_factor_order = self.model._factor_order
_error_order = self.model._error_order
# Add parameter tables for each endogenous variable
loading_indices = indices[self.model._params_loadings]
loading_masks = []
exog_indices = indices[self.model._params_exog]
exog_masks = []
for i in range(k_endog):
# 1. Factor loadings
# Recall these are in the form:
# 'loading.f1.y1', 'loading.f2.y1', 'loading.f1.y2', ...
loading_mask = (
loading_indices[i * k_factors:(i + 1) * k_factors])
loading_masks.append(loading_mask)
# 2. Exog
# Recall these are in the form:
# beta.x1.y1, beta.x2.y1, beta.x1.y2, ...
exog_mask = exog_indices[i * k_exog:(i + 1) * k_exog]
exog_masks.append(exog_mask)
# Create the table
mask = np.concatenate([loading_mask, exog_mask])
title = "Results for equation %s" % self.model.endog_names[i]
table = make_table(self, mask, title)
summary.tables.append(table)
# Add parameter tables for each factor
factor_indices = indices[self.model._params_factor_transition]
factor_masks = []
if factor_order > 0:
for i in range(k_factors):
start = i * _factor_order
factor_mask = factor_indices[start: start + _factor_order]
factor_masks.append(factor_mask)
# Create the table
title = "Results for factor equation f%d" % (i+1)
table = make_table(self, factor_mask, title)
summary.tables.append(table)
# Add parameter tables for error transitions
error_masks = []
if spec.error_order > 0:
error_indices = indices[self.model._params_error_transition]
for i in range(k_endog):
if spec.error_var:
start = i * _error_order
end = (i + 1) * _error_order
else:
start = i * spec.error_order
end = (i + 1) * spec.error_order
error_mask = error_indices[start:end]
error_masks.append(error_mask)
# Create the table
title = ("Results for error equation e(%s)" %
self.model.endog_names[i])
table = make_table(self, error_mask, title)
summary.tables.append(table)
# Error covariance terms
error_cov_mask = indices[self.model._params_error_cov]
table = make_table(self, error_cov_mask,
"Error covariance matrix", strip_end=False)
summary.tables.append(table)
# Add a table for all other parameters
masks = []
for m in (loading_masks, exog_masks, factor_masks,
error_masks, [error_cov_mask]):
m = np.array(m).flatten()
if len(m) > 0:
masks.append(m)
masks = np.concatenate(masks)
inverse_mask = np.array(list(set(indices).difference(set(masks))))
if len(inverse_mask) > 0:
table = make_table(self, inverse_mask, "Other parameters",
strip_end=False)
summary.tables.append(table)
return summary
class DynamicFactorResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(DynamicFactorResultsWrapper, # noqa:E305
DynamicFactorResults)
| 41.47151 | 79 | 0.595559 |
4a233a8c3744fbf4f8a464eeeb200c5eff585c30 | 17,673 | py | Python | python_modules/dagster/dagster/core/execution/resources_init.py | metinsenturk/dagster | 3560475d0d99a319632625683002931e502f32ed | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/resources_init.py | metinsenturk/dagster | 3560475d0d99a319632625683002931e502f32ed | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/resources_init.py | metinsenturk/dagster | 3560475d0d99a319632625683002931e502f32ed | [
"Apache-2.0"
] | null | null | null | import inspect
from collections import deque
from typing import AbstractSet, Any, Callable, Deque, Dict, Optional, cast
from dagster import check
from dagster.core.decorator_utils import get_function_params
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.resource import (
ResourceDefinition,
ScopedResourcesBuilder,
is_context_provided,
)
from dagster.core.errors import (
DagsterInvariantViolationError,
DagsterResourceFunctionError,
DagsterUserCodeExecutionError,
user_code_error_boundary,
)
from dagster.core.events import DagsterEvent
from dagster.core.execution.plan.inputs import (
StepInput,
UnresolvedCollectStepInput,
UnresolvedMappedStepInput,
)
from dagster.core.execution.plan.plan import ExecutionPlan, StepHandleUnion
from dagster.core.execution.plan.step import ExecutionStep
from dagster.core.instance import DagsterInstance
from dagster.core.log_manager import DagsterLogManager
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.system_config.objects import ResourceConfig
from dagster.core.utils import toposort
from dagster.utils import EventGenerationManager, ensure_gen
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.timing import format_duration, time_execution_scope
from .context.init import InitResourceContext
def resource_initialization_manager(
resource_defs: Dict[str, ResourceDefinition],
resource_configs: Dict[str, ResourceConfig],
log_manager: DagsterLogManager,
execution_plan: Optional[ExecutionPlan],
pipeline_run: Optional[PipelineRun],
resource_keys_to_init: Optional[AbstractSet[str]],
instance: Optional[DagsterInstance],
emit_persistent_events: Optional[bool],
pipeline_def_for_backwards_compat: Optional[PipelineDefinition],
):
generator = resource_initialization_event_generator(
resource_defs=resource_defs,
resource_configs=resource_configs,
log_manager=log_manager,
execution_plan=execution_plan,
pipeline_run=pipeline_run,
resource_keys_to_init=resource_keys_to_init,
instance=instance,
emit_persistent_events=emit_persistent_events,
pipeline_def_for_backwards_compat=pipeline_def_for_backwards_compat,
)
return EventGenerationManager(generator, ScopedResourcesBuilder)
def resolve_resource_dependencies(resource_defs):
"""Generates a dictionary that maps resource key to resource keys it requires for initialization"""
resource_dependencies = {
key: resource_def.required_resource_keys for key, resource_def in resource_defs.items()
}
return resource_dependencies
def get_dependencies(resource_name, resource_deps):
"""Get all resources that must be initialized before resource_name can be initialized.
Uses dfs to get all required dependencies from a particular resource. If dependencies are
cyclic, raise a DagsterInvariantViolationError.
"""
path = set() # resources we are currently checking the dependencies of
reqd_resources = set()
# adds dependencies for a given resource key to reqd_resources
def _get_deps_helper(resource_key):
path.add(resource_key)
for reqd_resource_key in resource_deps[resource_key]:
if reqd_resource_key in path:
raise DagsterInvariantViolationError(
'Resource key "{key}" transitively depends on itself.'.format(
key=reqd_resource_key
)
)
_get_deps_helper(reqd_resource_key)
path.remove(resource_key)
reqd_resources.add(resource_key)
_get_deps_helper(resource_name)
return set(reqd_resources)
def _core_resource_initialization_event_generator(
resource_defs: Dict[str, ResourceDefinition],
resource_configs: Dict[str, ResourceConfig],
resource_log_manager: DagsterLogManager,
resource_managers: Deque[EventGenerationManager],
execution_plan: Optional[ExecutionPlan],
pipeline_run: Optional[PipelineRun],
resource_keys_to_init: Optional[AbstractSet[str]],
instance: Optional[DagsterInstance],
emit_persistent_events: Optional[bool],
pipeline_def_for_backwards_compat: Optional[PipelineDefinition],
):
pipeline_name = None
contains_generator = False
if emit_persistent_events:
check.invariant(
pipeline_run and execution_plan,
"If emit_persistent_events is enabled, then pipeline_run and execution_plan must be provided",
)
pipeline_name = cast(PipelineRun, pipeline_run).pipeline_name
resource_keys_to_init = check.opt_set_param(resource_keys_to_init, "resource_keys_to_init")
resource_instances: Dict[str, "InitializedResource"] = {}
resource_init_times = {}
try:
if emit_persistent_events and resource_keys_to_init:
yield DagsterEvent.resource_init_start(
cast(str, pipeline_name),
cast(ExecutionPlan, execution_plan),
resource_log_manager,
resource_keys_to_init,
)
resource_dependencies = resolve_resource_dependencies(resource_defs)
for level in toposort(resource_dependencies):
for resource_name in level:
resource_def = resource_defs[resource_name]
if not resource_name in resource_keys_to_init:
continue
resource_fn = cast(Callable[[InitResourceContext], Any], resource_def.resource_fn)
resources = ScopedResourcesBuilder(resource_instances).build(
resource_def.required_resource_keys
)
resource_context = InitResourceContext(
resource_def=resource_def,
resource_config=resource_configs[resource_name].config,
pipeline_run=pipeline_run,
# Add tags with information about the resource
log_manager=resource_log_manager.with_tags(
resource_name=resource_name,
resource_fn_name=str(resource_fn.__name__),
),
resources=resources,
instance=instance,
pipeline_def_for_backwards_compat=pipeline_def_for_backwards_compat,
)
manager = single_resource_generation_manager(
resource_context, resource_name, resource_def
)
for event in manager.generate_setup_events():
if event:
yield event
initialized_resource = check.inst(manager.get_object(), InitializedResource)
resource_instances[resource_name] = initialized_resource.resource
resource_init_times[resource_name] = initialized_resource.duration
contains_generator = contains_generator or initialized_resource.is_generator
resource_managers.append(manager)
if emit_persistent_events and resource_keys_to_init:
yield DagsterEvent.resource_init_success(
cast(str, pipeline_name),
cast(ExecutionPlan, execution_plan),
resource_log_manager,
resource_instances,
resource_init_times,
)
yield ScopedResourcesBuilder(resource_instances, contains_generator)
except DagsterUserCodeExecutionError as dagster_user_error:
# Can only end up in this state if we attempt to initialize a resource, so
# resource_keys_to_init cannot be empty
if emit_persistent_events:
yield DagsterEvent.resource_init_failure(
cast(str, pipeline_name),
cast(ExecutionPlan, execution_plan),
resource_log_manager,
resource_keys_to_init,
serializable_error_info_from_exc_info(dagster_user_error.original_exc_info),
)
raise dagster_user_error
def resource_initialization_event_generator(
resource_defs: Dict[str, ResourceDefinition],
resource_configs: Dict[str, ResourceConfig],
log_manager: DagsterLogManager,
execution_plan: Optional[ExecutionPlan],
pipeline_run: Optional[PipelineRun],
resource_keys_to_init: Optional[AbstractSet[str]],
instance: Optional[DagsterInstance],
emit_persistent_events: Optional[bool],
pipeline_def_for_backwards_compat: Optional[PipelineDefinition],
):
check.inst_param(log_manager, "log_manager", DagsterLogManager)
resource_keys_to_init = check.opt_set_param(
resource_keys_to_init, "resource_keys_to_init", of_type=str
)
check.opt_inst_param(execution_plan, "execution_plan", ExecutionPlan)
check.opt_inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.opt_inst_param(instance, "instance", DagsterInstance)
if execution_plan and execution_plan.step_handle_for_single_step_plans():
step = execution_plan.get_step(
cast(
StepHandleUnion,
cast(ExecutionPlan, execution_plan).step_handle_for_single_step_plans(),
)
)
resource_log_manager = log_manager.with_tags(**cast(ExecutionStep, step).logging_tags)
else:
resource_log_manager = log_manager
generator_closed = False
resource_managers: Deque[EventGenerationManager] = deque()
try:
yield from _core_resource_initialization_event_generator(
resource_defs=resource_defs,
resource_configs=resource_configs,
resource_log_manager=resource_log_manager,
resource_managers=resource_managers,
execution_plan=execution_plan,
pipeline_run=pipeline_run,
resource_keys_to_init=resource_keys_to_init,
instance=instance,
emit_persistent_events=emit_persistent_events,
pipeline_def_for_backwards_compat=pipeline_def_for_backwards_compat,
)
except GeneratorExit:
# Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed
# (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).
generator_closed = True
raise
finally:
if not generator_closed:
error = None
while len(resource_managers) > 0:
manager = resource_managers.pop()
try:
yield from manager.generate_teardown_events()
except DagsterUserCodeExecutionError as dagster_user_error:
error = dagster_user_error
if error and emit_persistent_events:
yield DagsterEvent.resource_teardown_failure(
cast(PipelineRun, pipeline_run).pipeline_name,
cast(ExecutionPlan, execution_plan),
resource_log_manager,
resource_keys_to_init,
serializable_error_info_from_exc_info(error.original_exc_info),
)
class InitializedResource:
"""Utility class to wrap the untyped resource object emitted from the user-supplied
resource function. Used for distinguishing from the framework-yielded events in an
`EventGenerationManager`-wrapped event stream.
"""
def __init__(self, obj, duration, is_generator):
self.resource = obj
self.duration = duration
self.is_generator = is_generator
def single_resource_generation_manager(context, resource_name, resource_def):
generator = single_resource_event_generator(context, resource_name, resource_def)
return EventGenerationManager(generator, InitializedResource)
def single_resource_event_generator(context, resource_name, resource_def):
try:
msg_fn = lambda: "Error executing resource_fn on ResourceDefinition {name}".format(
name=resource_name
)
with user_code_error_boundary(
DagsterResourceFunctionError, msg_fn, log_manager=context.log
):
try:
with time_execution_scope() as timer_result:
resource_or_gen = (
resource_def.resource_fn(context)
if is_context_provided(get_function_params(resource_def.resource_fn))
else resource_def.resource_fn()
)
# Flag for whether resource is generator. This is used to ensure that teardown
# occurs when resources are initialized out of execution.
is_gen = inspect.isgenerator(resource_or_gen)
gen = ensure_gen(resource_or_gen)
resource = next(gen)
resource = InitializedResource(
resource, format_duration(timer_result.millis), is_gen
)
except StopIteration:
check.failed(
"Resource generator {name} must yield one item.".format(name=resource_name)
)
yield resource
except DagsterUserCodeExecutionError as dagster_user_error:
raise dagster_user_error
with user_code_error_boundary(DagsterResourceFunctionError, msg_fn, log_manager=context.log):
try:
next(gen)
except StopIteration:
pass
else:
check.failed(
"Resource generator {name} yielded more than one item.".format(name=resource_name)
)
def get_required_resource_keys_to_init(
execution_plan, pipeline_def, resolved_run_config, intermediate_storage_def
):
resource_keys = set()
if intermediate_storage_def is not None:
resource_keys = resource_keys.union(intermediate_storage_def.required_resource_keys)
for step_handle, step in execution_plan.step_dict.items():
if step_handle not in execution_plan.step_handles_to_execute:
continue
hook_defs = pipeline_def.get_all_hooks_for_handle(step.solid_handle)
for hook_def in hook_defs:
resource_keys = resource_keys.union(hook_def.required_resource_keys)
resource_keys = resource_keys.union(
get_required_resource_keys_for_step(
pipeline_def, step, execution_plan, resolved_run_config, intermediate_storage_def
)
)
return frozenset(resource_keys)
def get_required_resource_keys_for_step(
pipeline_def, execution_step, execution_plan, resolved_run_config, intermediate_storage_def
):
resource_keys = set()
mode_definition = pipeline_def.get_mode_definition(resolved_run_config.mode)
resource_dependencies = resolve_resource_dependencies(mode_definition.resource_defs)
# add all the intermediate storage resource keys
if intermediate_storage_def is not None:
resource_keys = resource_keys.union(intermediate_storage_def.required_resource_keys)
# add all the solid compute resource keys
solid_def = pipeline_def.get_solid(execution_step.solid_handle).definition
resource_keys = resource_keys.union(solid_def.required_resource_keys)
# add input type, input loader, and input io manager resource keys
for step_input in execution_step.step_inputs:
input_def = step_input.source.get_input_def(pipeline_def)
resource_keys = resource_keys.union(input_def.dagster_type.required_resource_keys)
resource_keys = resource_keys.union(step_input.source.required_resource_keys(pipeline_def))
if isinstance(step_input, StepInput):
source_handles = step_input.get_step_output_handle_dependencies()
elif isinstance(step_input, (UnresolvedMappedStepInput, UnresolvedCollectStepInput)):
# Placeholder handles will allow lookup of the unresolved execution steps
# for what resources will be needed once the steps resolve
source_handles = step_input.get_step_output_handle_deps_with_placeholders()
else:
check.failed(f"Unexpected step input type {step_input}")
for source_handle in source_handles:
source_manager_key = execution_plan.get_manager_key(source_handle, pipeline_def)
if source_manager_key:
resource_keys = resource_keys.union([source_manager_key])
# add output type, output materializer, and output io manager resource keys
for step_output in execution_step.step_outputs:
# Load the output type
output_def = solid_def.output_def_named(step_output.name)
resource_keys = resource_keys.union(output_def.dagster_type.required_resource_keys)
if step_output.should_materialize and output_def.dagster_type.materializer:
resource_keys = resource_keys.union(
output_def.dagster_type.materializer.required_resource_keys()
)
if output_def.io_manager_key:
resource_keys = resource_keys.union([output_def.io_manager_key])
# add all the storage-compatible plugin resource keys
for dagster_type in solid_def.all_dagster_types():
for auto_plugin in dagster_type.auto_plugins:
if intermediate_storage_def is not None:
if auto_plugin.compatible_with_storage_def(intermediate_storage_def):
resource_keys = resource_keys.union(auto_plugin.required_resource_keys())
for resource_name in resource_keys:
resource_keys = resource_keys.union(
set(get_dependencies(resource_name, resource_dependencies))
)
return frozenset(resource_keys)
| 42.688406 | 106 | 0.700843 |
4a233b686e523080df20fc709aa1fa7920d176a6 | 17,644 | py | Python | databricks/koalas/tests/test_indexing.py | abishekganesh72/koalas | 40c2e209384d078ee75d08c7681d2e6a276ab834 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/tests/test_indexing.py | abishekganesh72/koalas | 40c2e209384d078ee75d08c7681d2e6a276ab834 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/tests/test_indexing.py | abishekganesh72/koalas | 40c2e209384d078ee75d08c7681d2e6a276ab834 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame({'month': [1, 4, 7, 10],
'year': [2012, 2014, 2013, 2014],
'sale': [55, 40, 84, 31]})
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index('month')
yield df1
yield df.set_index('month', drop=False)
yield df.set_index('month', append=True)
yield df.set_index(['year', 'month'])
yield df.set_index(['year', 'month'], drop=False)
yield df.set_index(['year', 'month'], append=True)
yield df1.set_index('year', drop=False, append=True)
df2 = df1.copy()
df2.set_index('year', append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, 'unknown', lambda: df.set_index('unknown'))
self.assertRaisesRegex(KeyError, 'unknown', lambda: df.set_index(['month', 'unknown']))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level='month')
yield df2.reset_index(level='year')
yield df2.reset_index(level=['month', 'year'])
yield df2.reset_index(level='month', drop=True)
yield df2.reset_index(level=['month', 'year'], drop=True)
if LooseVersion("0.20.0") <= LooseVersion(pd.__version__):
self.assertRaisesRegex(IndexError, 'Too many levels: Index has only 1 level, not 3',
lambda: df1.reset_index(level=2))
self.assertRaisesRegex(IndexError, 'Too many levels: Index has only 1 level, not 4',
lambda: df1.reset_index(level=[3, 2]))
self.assertRaisesRegex(KeyError, 'Level unknown must be same as name \\(month\\)',
lambda: df1.reset_index(level='unknown'))
self.assertRaisesRegex(KeyError, 'Level unknown not found',
lambda: df2.reset_index(level='unknown'))
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name='s')
yield df1.sale.reset_index(name='s', drop=True)
s = df1.sale
self.assertRaisesRegex(TypeError,
'Cannot reset_index inplace on a Series to create a DataFrame',
lambda: s.reset_index(inplace=True))
s.reset_index(drop=True, inplace=True)
yield s
yield df1
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = koalas.from_pandas(pdf.set_index('month'))
self.assertPandasEqual(df1.toPandas(), pdf.set_index('month'))
df2 = koalas.from_pandas(pdf.set_index(['year', 'month']))
self.assertPandasEqual(df2.toPandas(), pdf.set_index(['year', 'month']))
def test_limitations(self):
df = self.kdf.set_index('month')
self.assertRaisesRegex(ValueError, 'Level should be all int or all string.',
lambda: df.reset_index([1, 'month']))
self.assertRaisesRegex(NotImplementedError, 'Can\'t reset index because there is no index.',
lambda: df.reset_index().reset_index())
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0]
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def kdf(self):
return koalas.from_pandas(self.pdf)
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
def test_loc_non_informative_index(self):
pdf = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 30, 40])
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.loc[20:30], pdf.loc[20:30])
pdf = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 20, 40])
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.loc[20:20], pdf.loc[20:20])
def test_loc_with_series(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[kdf.a % 2 == 0], pdf.loc[pdf.a % 2 == 0])
def test_loc_noindex(self):
kdf = self.kdf
kdf = kdf.reset_index()
pdf = self.pdf
pdf = pdf.reset_index()
self.assert_eq(kdf[['a']], pdf[['a']])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assertRaises(NotImplementedError, lambda: kdf.loc[5:5])
def test_loc_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index('b', append=True)
pdf = self.pdf
pdf = pdf.set_index('b', append=True)
self.assert_eq(kdf[['a']], pdf[['a']])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assertRaises(NotImplementedError, lambda: kdf.loc[5:5])
def test_loc2d_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index('b', append=True)
pdf = self.pdf
pdf = pdf.set_index('b', append=True)
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[:, 'a'], pdf.loc[:, 'a'])
self.assertRaises(NotImplementedError, lambda: kdf.loc[5:5, 'a'])
def test_loc2d(self):
kdf = self.kdf
pdf = self.pdf
# index indexer is always regarded as slice for duplicated values
self.assert_eq(kdf.loc[5:5, 'a'], pdf.loc[5:5, 'a'])
self.assert_eq(kdf.loc[[5], 'a'], pdf.loc[[5], 'a'])
self.assert_eq(kdf.loc[5:5, ['a']], pdf.loc[5:5, ['a']])
self.assert_eq(kdf.loc[[5], ['a']], pdf.loc[[5], ['a']])
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[3:8, 'a'], pdf.loc[3:8, 'a'])
self.assert_eq(kdf.loc[:8, 'a'], pdf.loc[:8, 'a'])
self.assert_eq(kdf.loc[3:, 'a'], pdf.loc[3:, 'a'])
self.assert_eq(kdf.loc[[8], 'a'], pdf.loc[[8], 'a'])
self.assert_eq(kdf.loc[3:8, ['a']], pdf.loc[3:8, ['a']])
self.assert_eq(kdf.loc[:8, ['a']], pdf.loc[:8, ['a']])
self.assert_eq(kdf.loc[3:, ['a']], pdf.loc[3:, ['a']])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 3], ['a']], pdf.loc[[3, 4, 3], ['a']])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[3, 3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3:, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[kdf.a % 2 == 0, 3])
def test_loc2d_with_known_divisions(self):
pdf = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('ABCDE'))
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.loc[['a'], 'A'], pdf.loc[['a'], 'A'])
self.assert_eq(kdf.loc[['a'], ['A']], pdf.loc[['a'], ['A']])
self.assert_eq(kdf.loc['a':'o', 'A'], pdf.loc['a':'o', 'A'])
self.assert_eq(kdf.loc['a':'o', ['A']], pdf.loc['a':'o', ['A']])
self.assert_eq(kdf.loc[['n'], ['A']], pdf.loc[['n'], ['A']])
self.assert_eq(kdf.loc[['a', 'c', 'n'], ['A']], pdf.loc[['a', 'c', 'n'], ['A']])
# TODO?: self.assert_eq(kdf.loc[['t', 'b'], ['A']], pdf.loc[['t', 'b'], ['A']])
# TODO?: self.assert_eq(kdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
# TODO?: pdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
def test_loc2d_duplicated_columns(self):
pdf = pd.DataFrame(np.random.randn(20, 5),
index=list('abcdefghijklmnopqrst'),
columns=list('AABCD'))
pdf = koalas.from_pandas(pdf)
# TODO?: self.assert_eq(pdf.loc[['a'], 'A'], pdf.loc[['a'], 'A'])
# TODO?: self.assert_eq(pdf.loc[['a'], ['A']], pdf.loc[['a'], ['A']])
self.assert_eq(pdf.loc[['j'], 'B'], pdf.loc[['j'], 'B'])
self.assert_eq(pdf.loc[['j'], ['B']], pdf.loc[['j'], ['B']])
# TODO?: self.assert_eq(pdf.loc['a':'o', 'A'], pdf.loc['a':'o', 'A'])
# TODO?: self.assert_eq(pdf.loc['a':'o', ['A']], pdf.loc['a':'o', ['A']])
self.assert_eq(pdf.loc['j':'q', 'B'], pdf.loc['j':'q', 'B'])
self.assert_eq(pdf.loc['j':'q', ['B']], pdf.loc['j':'q', ['B']])
# TODO?: self.assert_eq(pdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(pdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(pdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
# TODO?: self.assert_eq(pdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
self.assert_eq(pdf.loc[pdf.B > 0, 'B'], pdf.loc[pdf.B > 0, 'B'])
# TODO?: self.assert_eq(pdf.loc[pdf.B > 0, ['A', 'C']], pdf.loc[pdf.B > 0, ['A', 'C']])
def test_getitem(self):
pdf = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf['A'], pdf['A'])
self.assert_eq(kdf[['A', 'B']], pdf[['A', 'B']])
self.assert_eq(kdf[kdf.C], pdf[pdf.C])
self.assertRaises(KeyError, lambda: kdf['X'])
self.assertRaises(KeyError, lambda: kdf[['A', 'X']])
self.assertRaises(AttributeError, lambda: kdf.X)
# not str/unicode
# TODO?: pdf = pd.DataFrame(np.random.randn(10, 5))
# TODO?: kdf = koalas.from_pandas(pdf)
# TODO?: self.assert_eq(kdf[0], pdf[0])
# TODO?: self.assert_eq(kdf[[1, 2]], pdf[[1, 2]])
# TODO?: self.assertRaises(KeyError, lambda: pdf[8])
# TODO?: self.assertRaises(KeyError, lambda: pdf[[1, 8]])
def test_getitem_slice(self):
pdf = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
index=list('abcdefghi'))
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf['a':'e'], pdf['a':'e'])
self.assert_eq(kdf['a':'b'], pdf['a':'b'])
self.assert_eq(kdf['f':], pdf['f':])
def test_loc_on_numpy_datetimes(self):
pdf = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.loc['2014':'2015'], pdf.loc['2014':'2015'])
def test_loc_on_pandas_datetimes(self):
pdf = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.loc['2014':'2015'], pdf.loc['2014':'2015'])
@unittest.skip('TODO?: the behavior of slice for datetime')
def test_loc_datetime_no_freq(self):
datetime_index = pd.date_range('2016-01-01', '2016-01-31', freq='12h')
datetime_index.freq = None # FORGET FREQUENCY
pdf = pd.DataFrame({'num': range(len(datetime_index))}, index=datetime_index)
kdf = koalas.from_pandas(pdf)
slice_ = slice('2016-01-03', '2016-01-05')
result = kdf.loc[slice_, :]
expected = pdf.loc[slice_, :]
self.assert_eq(result, expected)
@unittest.skip('TODO?: the behavior of slice for datetime')
def test_loc_timestamp_str(self):
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
kdf = koalas.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf.loc['2011-01-02'],
# TODO?: kdf.loc['2011-01-02'])
self.assert_eq(pdf.loc['2011-01-02':'2011-01-05'],
kdf.loc['2011-01-02':'2011-01-05'])
# series
# TODO?: self.assert_eq(pdf.A.loc['2011-01-02'],
# TODO?: kdf.A.loc['2011-01-02'])
self.assert_eq(pdf.A.loc['2011-01-02':'2011-01-05'],
kdf.A.loc['2011-01-02':'2011-01-05'])
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='M', periods=100))
kdf = koalas.from_pandas(pdf)
# TODO?: self.assert_eq(pdf.loc['2011-01'], kdf.loc['2011-01'])
# TODO?: self.assert_eq(pdf.loc['2011'], kdf.loc['2011'])
self.assert_eq(pdf.loc['2011-01':'2012-05'], kdf.loc['2011-01':'2012-05'])
self.assert_eq(pdf.loc['2011':'2015'], kdf.loc['2011':'2015'])
# series
# TODO?: self.assert_eq(pdf.B.loc['2011-01'], kdf.B.loc['2011-01'])
# TODO?: self.assert_eq(pdf.B.loc['2011'], kdf.B.loc['2011'])
self.assert_eq(pdf.B.loc['2011-01':'2012-05'], kdf.B.loc['2011-01':'2012-05'])
self.assert_eq(pdf.B.loc['2011':'2015'], kdf.B.loc['2011':'2015'])
@unittest.skip('TODO?: the behavior of slice for datetime')
def test_getitem_timestamp_str(self):
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='H', periods=100))
kdf = koalas.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf['2011-01-02':'2011-01-05'],
kdf['2011-01-02':'2011-01-05'])
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.date_range('2011-01-01', freq='M', periods=100))
kdf = koalas.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf['2011-01':'2012-05'], kdf['2011-01':'2012-05'])
self.assert_eq(pdf['2011':'2015'], kdf['2011':'2015'])
@unittest.skip('TODO?: period index can\'t convert to DataFrame correctly')
def test_getitem_period_str(self):
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='H', periods=100))
kdf = koalas.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf['2011-01-02':'2011-01-05'],
kdf['2011-01-02':'2011-01-05'])
pdf = pd.DataFrame({'A': np.random.randn(100), 'B': np.random.randn(100)},
index=pd.period_range('2011-01-01', freq='M', periods=100))
kdf = koalas.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf['2011-01':'2012-05'], kdf['2011-01':'2012-05'])
self.assert_eq(pdf['2011':'2015'], kdf['2011':'2015'])
| 42.72155 | 100 | 0.545285 |
4a233c3784e51473706f5deef0f4ed3f7530cc6b | 5,134 | py | Python | Lib/test/test_msilib.py | ShiboXing/cpython | ef19bad7d6da99575d66c1f5dc8fd6ac57e92f6e | [
"CNRI-Python-GPL-Compatible"
] | 1,318 | 2019-07-11T10:34:39.000Z | 2022-03-29T15:05:19.000Z | Lib/test/test_msilib.py | ShiboXing/cpython | ef19bad7d6da99575d66c1f5dc8fd6ac57e92f6e | [
"CNRI-Python-GPL-Compatible"
] | 387 | 2019-09-05T16:33:09.000Z | 2022-03-31T10:43:39.000Z | Lib/test/test_msilib.py | docmarionum1/pyvcs-python | f62cf1a76d51302f342319d446d6fbf3c60f29c3 | [
"0BSD"
] | 66 | 2019-11-11T15:33:12.000Z | 2022-03-01T07:55:55.000Z | """ Test suite for the code in msilib """
import os
import unittest
from test.support import TESTFN, import_module, unlink
msilib = import_module('msilib')
import msilib.schema
def init_database():
path = TESTFN + '.msi'
db = msilib.init_database(
path,
msilib.schema,
'Python Tests',
'product_code',
'1.0',
'PSF',
)
return db, path
class MsiDatabaseTestCase(unittest.TestCase):
def test_view_fetch_returns_none(self):
db, db_path = init_database()
properties = []
view = db.OpenView('SELECT Property, Value FROM Property')
view.Execute(None)
while True:
record = view.Fetch()
if record is None:
break
properties.append(record.GetString(1))
view.Close()
db.Close()
self.assertEqual(
properties,
[
'ProductName', 'ProductCode', 'ProductVersion',
'Manufacturer', 'ProductLanguage',
]
)
self.addCleanup(unlink, db_path)
def test_view_non_ascii(self):
db, db_path = init_database()
view = db.OpenView("SELECT 'ß-розпад' FROM Property")
view.Execute(None)
record = view.Fetch()
self.assertEqual(record.GetString(1), 'ß-розпад')
view.Close()
db.Close()
self.addCleanup(unlink, db_path)
def test_summaryinfo_getproperty_issue1104(self):
db, db_path = init_database()
try:
sum_info = db.GetSummaryInformation(99)
title = sum_info.GetProperty(msilib.PID_TITLE)
self.assertEqual(title, b"Installation Database")
sum_info.SetProperty(msilib.PID_TITLE, "a" * 999)
title = sum_info.GetProperty(msilib.PID_TITLE)
self.assertEqual(title, b"a" * 999)
sum_info.SetProperty(msilib.PID_TITLE, "a" * 1000)
title = sum_info.GetProperty(msilib.PID_TITLE)
self.assertEqual(title, b"a" * 1000)
sum_info.SetProperty(msilib.PID_TITLE, "a" * 1001)
title = sum_info.GetProperty(msilib.PID_TITLE)
self.assertEqual(title, b"a" * 1001)
finally:
db = None
sum_info = None
os.unlink(db_path)
def test_database_open_failed(self):
with self.assertRaises(msilib.MSIError) as cm:
msilib.OpenDatabase('non-existent.msi', msilib.MSIDBOPEN_READONLY)
self.assertEqual(str(cm.exception), 'open failed')
def test_database_create_failed(self):
db_path = os.path.join(TESTFN, 'test.msi')
with self.assertRaises(msilib.MSIError) as cm:
msilib.OpenDatabase(db_path, msilib.MSIDBOPEN_CREATE)
self.assertEqual(str(cm.exception), 'create failed')
def test_get_property_vt_empty(self):
db, db_path = init_database()
summary = db.GetSummaryInformation(0)
self.assertIsNone(summary.GetProperty(msilib.PID_SECURITY))
db.Close()
self.addCleanup(unlink, db_path)
def test_directory_start_component_keyfile(self):
db, db_path = init_database()
self.addCleanup(unlink, db_path)
self.addCleanup(db.Close)
self.addCleanup(msilib._directories.clear)
feature = msilib.Feature(db, 0, 'Feature', 'A feature', 'Python')
cab = msilib.CAB('CAB')
dir = msilib.Directory(db, cab, None, TESTFN, 'TARGETDIR',
'SourceDir', 0)
dir.start_component(None, feature, None, 'keyfile')
def test_getproperty_uninitialized_var(self):
db, db_path = init_database()
self.addCleanup(unlink, db_path)
self.addCleanup(db.Close)
si = db.GetSummaryInformation(0)
with self.assertRaises(msilib.MSIError):
si.GetProperty(-1)
class Test_make_id(unittest.TestCase):
#http://msdn.microsoft.com/en-us/library/aa369212(v=vs.85).aspx
"""The Identifier data type is a text string. Identifiers may contain the
ASCII characters A-Z (a-z), digits, underscores (_), or periods (.).
However, every identifier must begin with either a letter or an
underscore.
"""
def test_is_no_change_required(self):
self.assertEqual(
msilib.make_id("short"), "short")
self.assertEqual(
msilib.make_id("nochangerequired"), "nochangerequired")
self.assertEqual(
msilib.make_id("one.dot"), "one.dot")
self.assertEqual(
msilib.make_id("_"), "_")
self.assertEqual(
msilib.make_id("a"), "a")
#self.assertEqual(
# msilib.make_id(""), "")
def test_invalid_first_char(self):
self.assertEqual(
msilib.make_id("9.short"), "_9.short")
self.assertEqual(
msilib.make_id(".short"), "_.short")
def test_invalid_any_char(self):
self.assertEqual(
msilib.make_id(".s\x82ort"), "_.s_ort")
self.assertEqual(
msilib.make_id(".s\x82o?*+rt"), "_.s_o___rt")
if __name__ == '__main__':
unittest.main()
| 33.555556 | 78 | 0.608103 |
4a233c46cfa2ec61b5783332591e9f3903735dc7 | 1,312 | py | Python | src/dataset.py | jkulhanek/faster-rcnn-pytorch | a8db36f1ff3497b24e5d87f2b6e327534cceb688 | [
"MIT"
] | 2 | 2018-07-02T11:49:28.000Z | 2020-06-21T21:40:43.000Z | src/dataset.py | jkulhanek/faster-rcnn-pytorch | a8db36f1ff3497b24e5d87f2b6e327534cceb688 | [
"MIT"
] | null | null | null | src/dataset.py | jkulhanek/faster-rcnn-pytorch | a8db36f1ff3497b24e5d87f2b6e327534cceb688 | [
"MIT"
] | null | null | null | import torch
import torchvision
import model
import model_utils as utils
import torchvision.transforms as transforms
DATASET_PATH = "/datasets/pascalvoc/VOC2007/JPEGImages"
DATASET_TRAIN_TARGET = "/datasets/pascalvoc/annotations/pascal_train2007.json"
DATASET_PATH = "D:\\datasets\\pascalvoc\\VOC2007\\JPEGImages"
DATASET_TRAIN_TARGET = "D:\\datasets\\pascalvoc\\annotations\\pascal_train2007.json"
def target_transform(target):
# Ignore ignored
target = [x for x in target if x['ignore'] != 1]
target_count = len(target)
boxes = torch.empty((target_count, 4,), requires_grad = False)
labels =torch.empty((target_count,), requires_grad = False)
for i,elem in enumerate(target):
boxes[i] = torch.tensor(elem['bbox'])
labels[i] = elem['category_id']
return {
"boxes": boxes,
"labels": labels
}
def make_dataset(dataset_root_path = DATASET_PATH, dataset_train_target = DATASET_TRAIN_TARGET):
return torchvision.datasets.CocoDetection(DATASET_PATH, DATASET_TRAIN_TARGET,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
target_transform=transforms.Lambda(target_transform)) | 37.485714 | 97 | 0.692073 |
4a233dd1a2f7b56b37a0686592ac356a1007f48a | 1,204 | py | Python | xmltranslate/xml_ops.py | ramansah/xmllib | 71d375c7735636d28a5915afb04f19325f8a2bf7 | [
"MIT"
] | null | null | null | xmltranslate/xml_ops.py | ramansah/xmllib | 71d375c7735636d28a5915afb04f19325f8a2bf7 | [
"MIT"
] | null | null | null | xmltranslate/xml_ops.py | ramansah/xmllib | 71d375c7735636d28a5915afb04f19325f8a2bf7 | [
"MIT"
] | null | null | null | from xml.etree.ElementTree import fromstring, Element, SubElement, dump
def xml2dict(xml_obj: str):
tree = fromstring(xml_obj)
return _xml2dict(tree)
def dict2xml(dict_obj: dict):
for key, value in dict_obj.items():
xml_ele = Element(key)
return dump(_dict2xml(value, xml_ele))
def _dict2xml(dict_obj: dict, root):
for key, value in dict_obj.items():
if type(value) == dict:
t = SubElement(root, key)
_dict2xml(value, t)
else:
t = SubElement(root, key)
t.text = value
return root
def _xml2dict(xml_ele):
if xml_ele.getchildren():
obj = dict()
for child in xml_ele.getchildren():
obj[child.tag] = _xml2dict(child)
return {
xml_ele.tag: obj
}
else:
return xml_ele.text
if __name__ == '__main__':
test_string = '<parent><child2>CHILD2</child2><child1><child11>CHILD11</child11></child1></parent>'
test_obj = {
'parent': {
'child1': {
'child11': 'CHILD11'
},
'child2': 'CHILD2'
}
}
#print(xml2dict(test_string))
#print(dict2xml(test_obj))
| 24.571429 | 103 | 0.568106 |
4a233dd4541846e38dfd8d77cd00be2b898331bf | 153 | py | Python | pyrep/robots/arms/locobot_arm.py | WeiWeic6222848/PyRep | 231a1ac6b0a179cff53c1d403d379260b9f05f2f | [
"MIT"
] | 505 | 2019-06-26T17:02:44.000Z | 2022-03-31T04:03:23.000Z | pyrep/robots/arms/locobot_arm.py | WeiWeic6222848/PyRep | 231a1ac6b0a179cff53c1d403d379260b9f05f2f | [
"MIT"
] | 255 | 2019-06-27T07:04:17.000Z | 2022-03-29T18:25:48.000Z | pyrep/robots/arms/locobot_arm.py | WeiWeic6222848/PyRep | 231a1ac6b0a179cff53c1d403d379260b9f05f2f | [
"MIT"
] | 171 | 2019-06-27T05:33:50.000Z | 2022-03-30T03:34:24.000Z | from pyrep.robots.arms.arm import Arm
class LoCoBotArm(Arm):
def __init__(self, count: int = 0):
super().__init__(count, 'LoCoBotArm', 5)
| 19.125 | 48 | 0.666667 |
4a233df672395721ff4647f1f67ac71391518ed7 | 48 | wsgi | Python | tmp/www/wsgi-scripts/graphite-api.wsgi | anvart/stormwatch | 7a5f277d4021b3429871ea0c045567c365aec7f4 | [
"MIT"
] | 6 | 2017-07-05T16:59:16.000Z | 2020-07-01T10:17:09.000Z | tmp/www/wsgi-scripts/graphite-api.wsgi | anvart/stormwatch | 7a5f277d4021b3429871ea0c045567c365aec7f4 | [
"MIT"
] | 8 | 2017-11-03T13:36:53.000Z | 2021-09-05T11:05:17.000Z | tmp/www/wsgi-scripts/graphite-api.wsgi | anvart/stormwatch | 7a5f277d4021b3429871ea0c045567c365aec7f4 | [
"MIT"
] | 6 | 2016-11-10T12:56:41.000Z | 2018-06-19T21:53:58.000Z | from graphite_api.app import app as application
| 24 | 47 | 0.854167 |
4a233e9f10d49d2881df2b11546b1e33d6a2fbfe | 13,252 | py | Python | src/ExportCsvToInflux/csv_object.py | 7yl4r/export-csv-to-influx | 9bb5a192983363d15e2f4210ef0f8b8e3cd185d3 | [
"BSD-3-Clause"
] | null | null | null | src/ExportCsvToInflux/csv_object.py | 7yl4r/export-csv-to-influx | 9bb5a192983363d15e2f4210ef0f8b8e3cd185d3 | [
"BSD-3-Clause"
] | null | null | null | src/ExportCsvToInflux/csv_object.py | 7yl4r/export-csv-to-influx | 9bb5a192983363d15e2f4210ef0f8b8e3cd185d3 | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict
from .base_object import BaseObject
from itertools import tee
from glob import glob
import hashlib
import types
import time
import json
import csv
import sys
import os
class CSVObject(object):
"""CSV Object"""
def __init__(self, delimiter=',', lineterminator='\n'):
self.delimiter = delimiter
self.lineterminator = lineterminator
def get_csv_header(self, file_name):
"""Function: get_csv_header.
:param file_name: the file name
:return return csv header as list
"""
self.valid_file_exist(file_name)
with open(file_name) as f:
sniffer = csv.Sniffer()
try:
has_header = sniffer.has_header(f.read(40960))
except csv.Error:
has_header = False
f.seek(0)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
headers = csv_reader.fieldnames
is_header = not any(field.isdigit() for field in headers)
headers = headers if has_header or is_header else []
return headers
@staticmethod
def search_files_in_dir(directory, match_suffix='.csv', filter_pattern='_influx.csv'):
"""Function: search_files_in_dir
:param directory: the directory
:param match_suffix: match the file suffix, use comma to separate, only string, not support regex
:param filter_pattern: filter the files, only string, not support regex
"""
base_object = BaseObject()
match_suffix = base_object.str_to_list(match_suffix, lower=True)
filter_pattern = base_object.str_to_list(filter_pattern, lower=True)
# Is file
is_file = os.path.isfile(directory)
if is_file:
yield directory
# Search directory
for x in os.walk(directory):
for y in glob(os.path.join(x[0], '*.*')):
# Continue if directory
try:
check_directory = os.path.isdir(y)
except UnicodeEncodeError as e:
y = y.encode('utf-8', 'ignore')
print('Warning: Unicode Encode Error found when checking isdir {0}: {1}'.format(y, e))
check_directory = os.path.isdir(y)
if check_directory is True:
continue
# Filter Out
match_suffix_status = any(the_filter in y.lower() for the_filter in match_suffix)
filter_pattern_status = any(the_filter in y.lower() for the_filter in filter_pattern)
if match_suffix_status is True and filter_pattern_status is False:
yield y
@staticmethod
def valid_file_exist(file_name):
"""Function: valid_file_exist
:param file_name: the file name
"""
file_exists = os.path.exists(file_name)
if file_exists is False:
error_message = 'Error: The file does not exist: {0}'.format(file_name)
sys.exit(error_message)
def get_file_md5(self, file_name):
"""Function: get_file_md5
:param file_name: the file name
:return return the file md5
"""
self.valid_file_exist(file_name)
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(40960), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_file_modify_time(self, file_name, enable_ms=False):
"""Function: get_file_modify_time
:param file_name: the file name
:param enable_ms: enable milliseconds (default False)
:return return the human readable time
"""
self.valid_file_exist(file_name)
modified = os.path.getmtime(file_name)
modified_s, modified_ms = divmod(modified * 1000, 1000)
if enable_ms is False:
modified_pretty = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s))
else:
modified_pretty = '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s)), modified_ms)
return modified_pretty
def get_csv_lines_count(self, file_name):
"""Function: get_csv_lines_count.
:param file_name: the file name
:return return csv line count. No count header into count
"""
has_header = self.get_csv_header(file_name)
with open(file_name) as f:
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
count = 0 if has_header else 1
for row in csv_reader:
count += 1
return count
def convert_csv_data_to_int_float(self, file_name=None, csv_reader=None):
"""Function: convert_csv_data_to_int_float
:param file_name: the file name (default None)
:param csv_reader: the csv dict reader (default None)
The csv_reader could come from 2 ways:
1. use csv.DictReader to get the csv_reader object
2. use dict to make up the csv_reader, the dict format is as following
[
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
...
]
"""
# init
int_type = defaultdict(list)
float_type = defaultdict(list)
keys = list()
csv_reader = list() if csv_reader is None else csv_reader
csv_reader_bk = csv_reader
has_header = True
# Verify the csv_reader
csv_reader_type = type(csv_reader)
is_generator_type = isinstance(csv_reader, types.GeneratorType)
if csv_reader_type != list and csv_reader_type != csv.DictReader and not is_generator_type:
error_message = 'Error: The csv_reader type is not expected: {0}, ' \
'should list type or csv.DictReader'.format(csv_reader_type)
sys.exit(error_message)
if is_generator_type:
csv_reader, csv_reader_bk = tee(csv_reader)
# Get csv_reader from csv file
f = None
if file_name:
has_header = self.get_csv_header(file_name)
f = open(file_name)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
csv_reader, csv_reader_bk = tee(csv_reader)
# Process
for row in csv_reader:
keys = row.keys()
for key in keys:
value = row[key]
len_value = len(value)
# Continue If Value Empty
if len_value == 0:
continue
# Valid Int Type
try:
if float(value).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(value)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# check for empty columns
for key in keys:
if len(int_type[key]) == 0 or len(float_type[key]) == 0:
raise ValueError(
f"Column '{key}' has no valid int or float rows; "
"check your csv file."
)
# Valid the key if no header
if keys and not has_header:
for key in keys:
len_key = len(key)
# Continue If Key Empty
if len_key == 0:
continue
# Valid Int Type
try:
if float(key).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(key)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Finalize Type
int_type = {k: all(int_type[k]) for k in int_type}
float_type = {k: all(float_type[k]) for k in float_type}
# Yield Data
i = 1
for row in csv_reader_bk:
keys = row.keys()
for key in keys:
value = row[key]
int_status = int_type[key]
len_value = len(value)
if len_value == 0:
continue
if int_status is True:
row[key] = int(float(value)) if int_type[key] is True else value
else:
row[key] = float(value) if float_type[key] is True else value
yield row, int_type, float_type
if not has_header and i == 1:
for key in keys:
int_status = int_type[key]
len_key = len(key)
if len_key == 0:
continue
if int_status is True:
row[key] = int(float(key)) if int_type[key] is True else key
else:
row[key] = float(key) if float_type[key] is True else key
yield row, int_type, float_type
i += 1
# Close file
if file_name:
f.close()
def add_columns_to_csv(self,
file_name,
target,
data,
save_csv_file=True):
"""Function: add_columns_to_csv
:param file_name: the file name
:param target: the target file to save result
:param data: the new columns data, list type, the item is dict.
for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]},
{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}
]
:param save_csv_file: save csv file to local (default True)
:return return the new csv data by dict
"""
has_header = self.get_csv_header(file_name)
# Process data
data_type = type(data)
error_message = 'Error: The data should be list type, the item should be dict. Or the json type as following ' \
'for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]}, ' \
'{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}]'
try:
check_data_type = data_type is not list and data_type is not str and data_type is not unicode
except NameError:
check_data_type = data_type is not list and data_type is not str
if check_data_type:
sys.exit(error_message)
try:
check_data_type = data_type is str or data_type is unicode
except NameError:
check_data_type = data_type is str
if check_data_type:
try:
data = json.loads(data)
except ValueError:
sys.exit(error_message)
# Add columns
target_writer = None
target_file = None
if save_csv_file:
target_file = open(target, 'w+')
target_writer = csv.writer(target_file, delimiter=self.delimiter, lineterminator=self.lineterminator)
with open(file_name) as f:
source_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
new_headers = [list(x.keys())[0] for x in data]
row_id = 0
for row in source_reader:
values = list(row.values())
if row_id == 0:
headers = list(row.keys())
if not has_header:
continue
headers += new_headers
if save_csv_file:
target_writer.writerow(headers)
new_values = list()
for x in data:
try:
value = list(x.values())[0][row_id]
except IndexError:
print('Warning: The provided column length is less than with the source csv length. '
'Use "null" to fill the empty data')
value = 'null'
new_values.append(value)
values += new_values
row_id += 1
if save_csv_file:
target_writer.writerow(values)
yield dict(zip(headers, values))
if save_csv_file:
target_file.close()
| 37.120448 | 120 | 0.541956 |
4a233ea9eee4061fd7ff883078303aef2e90f084 | 483 | py | Python | tagulous/serializers/json.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | tagulous/serializers/json.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | tagulous/serializers/json.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | """
JSON serializer with Tagulous support
"""
from __future__ import unicode_literals
from django.core.serializers import json as json_serializer
from tagulous.serializers import base
class Serializer(base.SerializerMixin, json_serializer.Serializer):
"""
JSON serializer with tag field support
"""
pass
Deserializer = base.DeserializerWrapper(
json_serializer.Deserializer,
doc="Deserialize a stream or string of JSON data, with tag field support",
)
| 21 | 78 | 0.766046 |
4a233f05a5100d2f81b91a62e8a3d28d13bc7bd9 | 11,842 | py | Python | dj_hetmech_app/views.py | dhimmel/hetmech-backend | 28d9626d75c5fa78b03ae0cb18b32f90bcf1fe8f | [
"BSD-3-Clause"
] | 4 | 2020-01-24T16:11:10.000Z | 2020-11-19T20:40:53.000Z | dj_hetmech_app/views.py | dhimmel/hetmech-backend | 28d9626d75c5fa78b03ae0cb18b32f90bcf1fe8f | [
"BSD-3-Clause"
] | 57 | 2018-10-30T18:29:14.000Z | 2019-11-04T15:19:51.000Z | dj_hetmech_app/views.py | dhimmel/hetmech-backend | 28d9626d75c5fa78b03ae0cb18b32f90bcf1fe8f | [
"BSD-3-Clause"
] | 3 | 2018-10-30T18:07:19.000Z | 2019-06-04T14:50:44.000Z | import functools
from django.db.models import Q
from rest_framework import filters
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
from .models import Node, PathCount
from .serializers import NodeSerializer, MetapathSerializer, PathCountDgpSerializer
@api_view(['GET'])
def api_root(request):
"""
Hetionet connectivity search API. This API is used to power <https://search.het.io>.
The codebase for this API is available at <https://github.com/greenelab/connectivity-search-backend>.
Please use GitHub Issues for any questions or feedback.
"""
return Response([
reverse('node', request=request, kwargs={'pk': 2}),
reverse('nodes', request=request),
reverse('random-node-pair', request=request),
reverse('metapaths', request=request, kwargs={'source': 17054, 'target': 6602}),
reverse('metapaths-random-nodes', request=request),
reverse('paths', request=request, kwargs={'source': 17054, 'target': 6602, 'metapath': 'CbGeAlD'}),
])
class NodeViewSet(ReadOnlyModelViewSet):
"""
Return nodes, sorted by similarity to the search term.
Use `search=<str>` to search `identifier` for prefix match, and `name` for substring and trigram searches (similarity defaults to 0.3);
Use `search=<str>&similarity=<value>` to set your own `similarity` value in the range of (0, 1.0].
Set `similarity=1.0` to exclude trigram search.
Filter for select metanodes using `metanodes=<str>`, where `<str>` is a comma-separated list of metanode abbreviations.
For example, `metanodes=C,D` will restrict to Compound and Disease nodes.
Set `other-node=<node_id>` to return non-null values for `metapath_count`.
`metapath_counts` measures the number of metapaths stored in the database between the result node and other node.
If `search` and `other-node` and both specified, results are sorted by search similarity and results with `metapath_count == 0` are returned.
If `other-node` is specified but not `search`, results are sorted by `metapath_count` (descending) and only results with `metapath_count > 0` are returned.
"""
http_method_names = ['get']
serializer_class = NodeSerializer
filter_backends = (filters.SearchFilter, )
@functools.lru_cache()
def get_serializer_context(self):
"""
Add metapath_counts to context if "other-node" was specified.
https://stackoverflow.com/a/52859696/4651668
"""
context = super().get_serializer_context()
search_against = context['request'].query_params.get('other-node')
if search_against is None:
return context
try:
search_against = int(search_against)
except ValueError:
return context
from dj_hetmech_app.utils.paths import get_metapath_counts_for_node
context['metapath_counts'] = get_metapath_counts_for_node(search_against)
return context
def get_queryset(self):
"""Optionally restricts the returned nodes based on `metanodes` and
`search` parameters in the URL.
"""
queryset = Node.objects.all()
# 'metanodes' parameter for exact match on metanode abbreviation
metanodes = get_metanodes(self.request)
if metanodes is not None:
queryset = queryset.filter(metanode__abbreviation__in=metanodes)
# 'search' parameter to search 'identifier' and 'name' fields
search_str = self.request.query_params.get('search', None)
if search_str is not None:
from django.contrib.postgres.search import TrigramSimilarity
from django.db.models import Case, When, Value, IntegerField
# 'similarity' defaults to 0.3
similarity = self.request.query_params.get('similarity', "0.3")
try:
similarity = float(similarity)
if similarity <= 0 or similarity > 1.0:
raise ValueError
except ValueError:
from rest_framework.exceptions import ParseError
raise ParseError(
{'error': 'Value of similarity must be in (0, 1.0]'}
)
queryset = queryset.annotate(
similarity=TrigramSimilarity('name', search_str)
).filter(
Q(identifier__istartswith=search_str) | # prefix match of "identifier
Q(name__icontains=search_str) | # substring match of "name"
Q(similarity__gt=similarity) # trigram search of "name"
).annotate(
identifier_prefix_match=Case(
When(identifier__istartswith=search_str, then=Value(1)),
default=Value(0),
output_field=IntegerField(),
),
name_substr_match=Case(
When(name__icontains=search_str, then=Value(1)),
default=Value(0),
output_field=IntegerField(),
)
).order_by(
'-identifier_prefix_match', '-name_substr_match', '-similarity', 'name'
)
elif 'other-node' in self.request.query_params:
metapath_counts = self.get_serializer_context()['metapath_counts']
queryset = queryset.filter(pk__in=set(metapath_counts))
queryset = sorted(queryset, key=lambda node: metapath_counts[node.pk], reverse=True)
return queryset
class RandomNodePairView(APIView):
"""
Return a random source and target node for which at least one metapath with path count information exists in the database.
The implementation chooses a random row from the PathCount table,
such that source-target pairs with many metapaths are more likely to be selected than source-target pairs with few metapaths.
"""
http_method_names = ['get']
def get(self, request):
import random
# More info on random row lookup at https://stackoverflow.com/a/56119397/4651668
max_id = PathCount.objects.last().id
random_id = random.randint(0, max_id)
pathcount_row = PathCount.objects.get(pk=random_id)
n_metapaths = PathCount.objects.filter(source=pathcount_row.source, target=pathcount_row.target).count()
data = {
'source_id': pathcount_row.source.id,
'target_id': pathcount_row.target.id,
'n_metapaths': n_metapaths,
'pathcount_table_random_id': random_id,
'pathcount_table_max_id': max_id,
}
return Response(data)
class QueryMetapathsView(APIView):
"""
Return metapaths between a given source and target node whose path count information is stored in the database.
Specify `complete` to also return metapaths of unknown significance whose path count information is not stored in the database.
If not specified, `limit` defaults to returning all metapaths (i.e. without limit).
The database only stores a single orientation of a metapath.
For example, if GpPpGaD is stored between the given source and target node, DaGpPpG would not also be stored.
Therefore, both orientations of a metapath are searched against the PathCount table.
"""
http_method_names = ['get']
def get(self, request, source, target):
source_node = get_object_or_404(Node, pk=source)
target_node = get_object_or_404(Node, pk=target)
limit = get_limit(request, default=None)
from .utils.paths import get_pathcount_queryset, get_metapath_queryset
pathcounts = get_pathcount_queryset(source, target)
pathcounts = PathCountDgpSerializer(pathcounts, many=True).data
pathcounts.sort(key=lambda x: (x['adjusted_p_value'], x['p_value'], x['metapath_abbreviation']))
if limit is not None:
pathcounts = pathcounts[:limit]
if 'complete' in request.query_params:
metapaths_present = {x['metapath_id'] for x in pathcounts}
metapath_qs = get_metapath_queryset(
source_node.metanode,
target_node.metanode,
extra_filters=~Q(abbreviation__in=metapaths_present),
)
if limit is not None:
metapath_qs = metapath_qs[:limit - len(pathcounts)]
pathcounts += MetapathSerializer(metapath_qs, many=True).data
# `metapath_qs = metapath_qs[:0]` doesn't filter to an empty query set`
if limit is not None:
pathcounts = pathcounts[:limit]
remove_keys = {'source', 'target', 'metapath_source', 'metapath_target'}
for dictionary in pathcounts:
for key in remove_keys & set(dictionary):
del dictionary[key]
data = {
'source': NodeSerializer(source_node).data,
'target': NodeSerializer(target_node).data,
'path_counts': pathcounts,
}
return Response(data)
class QueryMetapathsRandomNodesView(QueryMetapathsView):
"""
Return metapaths for a random source and target node for which at least one metapath with path count information exists in the database.
"""
def get(self, request):
info = RandomNodePairView().get(request=None).data
response = super().get(
request,
source=info.pop('source_id'),
target=info.pop('target_id'))
response.data.update(info)
return response
class QueryPathsView(APIView):
"""
For a given source node, target node, and metapath, return the actual paths comprising the path count / DWPC.
These paths have not been pre-computed and are extracted on-the-fly from the Hetionet Neo4j Browser.
Therefore, it is advisable to avoid querying a source-target-metapath pair with a path count exceeding 10,000.
Because results are ordered by PDP / percent_of_DWPC, reducing `limit` does not prevent neo4j from having to exhaustively traverse all paths.
"""
http_method_names = ['get']
def get(self, request, source, target, metapath):
source_node = get_object_or_404(Node, pk=source)
target_node = get_object_or_404(Node, pk=target)
# TODO: validate "metapath" is a valid abbreviation
limit = get_limit(request, default=100)
from .utils.paths import get_paths
output = get_paths(metapath, source_node.id, target_node.id, limit=limit)
return Response(output)
def get_object_or_404(klass, *args, **kwargs):
"""
Similar to `django.shortcuts.get_object_or_404` but raises NotFound and produces a more verbose error message.
"""
from django.shortcuts import _get_queryset
from rest_framework.exceptions import NotFound
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
error = e
except queryset.model.MultipleObjectsReturned as e:
error = e
message = f"{error} Lookup parameters: args={args} kwargs={kwargs}"
raise NotFound(message)
def get_metanodes(request):
metanodes = request.query_params.get('metanodes')
if metanodes is not None:
assert isinstance(metanodes, str)
metanodes = metanodes.split(',')
return metanodes
def get_limit(request, default: int = 100):
from rest_framework.exceptions import ParseError
limit = request.query_params.get('limit', default)
if limit is None:
return None
try:
limit = int(limit)
except Exception:
raise ParseError("limit is not a valid number")
if limit < 0:
limit = None
return limit
| 43.218978 | 159 | 0.665259 |
4a233f128bd890849ac2ae2cd5160f1edf45f7e4 | 5,564 | py | Python | model/topic.py | jaiminpan/F2E.pg | 3b5baabc6f5a250bc96ff283e0afb3b1c44318de | [
"BSD-3-Clause"
] | null | null | null | model/topic.py | jaiminpan/F2E.pg | 3b5baabc6f5a250bc96ff283e0afb3b1c44318de | [
"BSD-3-Clause"
] | null | null | null | model/topic.py | jaiminpan/F2E.pg | 3b5baabc6f5a250bc96ff283e0afb3b1c44318de | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2012 F2E.im
# Do have a faith in what you're doing.
# Make your life a story worth telling.
import time
from lib.query import Query
class TopicModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "topic"
super(TopicModel, self).__init__()
def get_all_topics(self, num = 36, current_page = 1):
join = "LEFT JOIN users AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN users AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "last_touched DESC, created DESC, last_replied_time DESC, id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_all_topics_by_node_slug(self, num = 36, current_page = 1, node_slug = None):
where = "node.slug = '%s'" % node_slug
join = "LEFT JOIN users AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN users AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "last_touched DESC, created DESC, last_replied_time DESC, id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_all_topics_count(self):
return self.count()
def get_user_all_topics(self, uid, num = 36, current_page = 1):
where = "topic.author_id = %s" % uid
join = "LEFT JOIN users AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN users AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_user_all_topics_count(self, uid):
where = "author_id = %s" % uid
return self.where(where).count()
def get_user_all_replied_topics(self, uid, num = 36, current_page = 1):
where = "reply.uid = %s" % uid
join = "LEFT JOIN reply ON topic.id = reply.tid LEFT JOIN users ON topic.uid = users.uid"
order = "topic.id DESC"
field = "*, topic.created as created"
group = "tid"
return self.where(where).order(order).join(join).field(field).group(group).pages(current_page = current_page, list_rows = num)
def get_topic_by_topic_id(self, topic_id):
where = "topic.id = %s" % topic_id
join = "LEFT JOIN users AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN users AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).join(join).field(field).find()
def add_new_topic(self, topic_info):
return self.data(topic_info).add()
def update_topic_by_topic_id(self, topic_id, topic_info):
where = "topic.id = %s" % topic_id
return self.where(where).data(topic_info).save()
def get_user_last_created_topic(self, uid):
where = "topic.author_id = %s" % uid
order = "topic.created DESC"
return self.where(where).order(order).find()
| 48.807018 | 134 | 0.63138 |
4a233f3271d12938ca9505b4e942bdcd0342f0e1 | 1,948 | py | Python | okta/models/ws_federation_application_settings.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 145 | 2017-06-13T21:54:04.000Z | 2022-02-25T05:44:34.000Z | okta/models/ws_federation_application_settings.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 146 | 2017-06-02T17:46:12.000Z | 2022-03-29T15:52:15.000Z | okta/models/ws_federation_application_settings.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 98 | 2017-06-27T03:44:51.000Z | 2022-03-23T04:58:18.000Z | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.models.application_settings\
import ApplicationSettings
from okta.models import ws_federation_application_settings_application\
as ws_federation_application_settings_application
class WsFederationApplicationSettings(
ApplicationSettings
):
"""
A class for WsFederationApplicationSettings objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
if "app" in config:
if isinstance(config["app"],
ws_federation_application_settings_application.WsFederationApplicationSettingsApplication):
self.app = config["app"]
elif config["app"] is not None:
self.app = ws_federation_application_settings_application.WsFederationApplicationSettingsApplication(
config["app"]
)
else:
self.app = None
else:
self.app = None
else:
self.app = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"app": self.app
}
parent_req_format.update(current_obj_format)
return parent_req_format
| 33.016949 | 121 | 0.666838 |
4a23413f89804f690215d9a822b011e84f43d865 | 184 | py | Python | poradnia/cases/apps.py | efefre/poradnia | 8bf9c88888d538cf4d1224431355c850d31ef252 | [
"MIT"
] | 23 | 2015-07-20T01:10:52.000Z | 2021-01-12T10:05:48.000Z | poradnia/cases/apps.py | efefre/poradnia | 8bf9c88888d538cf4d1224431355c850d31ef252 | [
"MIT"
] | 710 | 2015-07-12T13:19:14.000Z | 2022-03-29T12:38:18.000Z | poradnia/cases/apps.py | efefre/poradnia | 8bf9c88888d538cf4d1224431355c850d31ef252 | [
"MIT"
] | 20 | 2015-07-21T00:45:34.000Z | 2021-01-31T12:48:18.000Z | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CustomAppConfig(AppConfig):
name = "poradnia.cases"
verbose_name = _("Cases")
| 23 | 55 | 0.766304 |
4a234156724f02501103597e1db5b79f6463e576 | 6,553 | py | Python | medusa/config.py | chebelom/cassandra-medusa | 5afc3aaf9295466cd24b5d97f9f6e3455498be72 | [
"Apache-2.0"
] | null | null | null | medusa/config.py | chebelom/cassandra-medusa | 5afc3aaf9295466cd24b5d97f9f6e3455498be72 | [
"Apache-2.0"
] | null | null | null | medusa/config.py | chebelom/cassandra-medusa | 5afc3aaf9295466cd24b5d97f9f6e3455498be72 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import configparser
import logging
import os
import pathlib
import sys
import medusa.storage
import medusa.cassandra_utils
StorageConfig = collections.namedtuple(
'StorageConfig',
['bucket_name', 'key_file', 'prefix', 'fqdn', 'host_file_separator', 'storage_provider',
'base_path', 'max_backup_age', 'max_backup_count', 'api_profile', 'transfer_max_bandwidth',
'concurrent_transfers', 'multi_part_upload_threshold', 'host', 'port', 'secure']
)
CassandraConfig = collections.namedtuple(
'CassandraConfig',
['start_cmd', 'stop_cmd', 'config_file', 'cql_username', 'cql_password', 'check_running', 'is_ccm',
'sstableloader_bin', 'nodetool_username', 'nodetool_password', 'nodetool_password_file_path', 'nodetool_host',
'nodetool_port']
)
SSHConfig = collections.namedtuple(
'SSHConfig',
['username', 'key_file', 'port']
)
ChecksConfig = collections.namedtuple(
'ChecksConfig',
['health_check', 'query', 'expected_rows', 'expected_result']
)
MonitoringConfig = collections.namedtuple(
'MonitoringConfig',
['monitoring_provider']
)
MedusaConfig = collections.namedtuple(
'MedusaConfig',
['storage', 'cassandra', 'ssh', 'restore', 'monitoring', 'logging']
)
LoggingConfig = collections.namedtuple(
'LoggingConfig',
['enabled', 'file', 'format', 'level', 'maxBytes', 'backupCount']
)
DEFAULT_CONFIGURATION_PATH = pathlib.Path('/etc/medusa/medusa.ini')
def load_config(args, config_file):
config = configparser.ConfigParser(interpolation=None)
# Set defaults
config['storage'] = {
'host_file_separator': ',',
'max_backup_age': 0,
'max_backup_count': 0,
'api_profile': 'default',
'transfer_max_bandwidth': '50MB/s',
'concurrent_transfers': 1,
'multi_part_upload_threshold': 100 * 1024 * 1024,
'secure': True,
}
config['logging'] = {
'enabled': 'false',
'file': 'medusa.log',
'level': 'INFO',
'format': '[%(asctime)s] %(levelname)s: %(message)s',
'maxBytes': 20000000,
'backupCount': 50,
}
config['cassandra'] = {
'config_file': medusa.cassandra_utils.CassandraConfigReader.DEFAULT_CASSANDRA_CONFIG,
'start_cmd': 'sudo /etc/init.d/cassandra start',
'stop_cmd': 'sudo service cassandra stop',
'check_running': 'nodetool version',
'is_ccm': 0,
'sstableloader_bin': 'sstableloader'
}
config['ssh'] = {
'username': os.environ.get('USER') or '',
'key_file': '',
'port': 22
}
config['checks'] = {
'health_check': 'cql',
'query': '',
'expected_rows': '0',
'expected_result': ''
}
config['monitoring'] = {
'monitoring_provider': 'None'
}
if config_file:
logging.debug('Loading configuration from {}'.format(config_file))
config.read_file(config_file.open())
elif DEFAULT_CONFIGURATION_PATH.exists():
logging.debug('Loading configuration from {}'.format(DEFAULT_CONFIGURATION_PATH))
config.read_file(DEFAULT_CONFIGURATION_PATH.open())
else:
logging.error(
'No configuration file provided via CLI, nor no default file found in {}'.format(DEFAULT_CONFIGURATION_PATH)
)
sys.exit(1)
config.read_dict({'storage': {
key: value
for key, value in _zip_fields_with_arg_values(StorageConfig._fields, args)
if value is not None
}})
config.read_dict({'logging': {
key: value
for key, value in _zip_fields_with_arg_values(LoggingConfig._fields, args)
if value is not None
}})
config.read_dict({'ssh': {
key: value
for key, value in _zip_fields_with_arg_values(SSHConfig._fields, args)
if value is not None
}})
config.read_dict({'restore': {
key: value
for key, value in _zip_fields_with_arg_values(ChecksConfig._fields, args)
if value is not None
}})
config.read_dict({'monitoring': {
key: value
for key, value in _zip_fields_with_arg_values(MonitoringConfig._fields, args)
if value is not None
}})
medusa_config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config['storage']),
cassandra=_namedtuple_from_dict(CassandraConfig, config['cassandra']),
ssh=_namedtuple_from_dict(SSHConfig, config['ssh']),
restore=_namedtuple_from_dict(ChecksConfig, config['checks']),
monitoring=_namedtuple_from_dict(MonitoringConfig, config['monitoring']),
logging=_namedtuple_from_dict(LoggingConfig, config['logging']),
)
for field in ['bucket_name', 'storage_provider']:
if getattr(medusa_config.storage, field) is None:
logging.error('Required configuration "{}" is missing in [storage] section.'.format(field))
sys.exit(2)
for field in ['start_cmd', 'stop_cmd']:
if getattr(medusa_config.cassandra, field) is None:
logging.error('Required configuration "{}" is missing in [cassandra] section.'.format(field))
sys.exit(2)
for field in ['username', 'key_file']:
if getattr(medusa_config.ssh, field) is None:
logging.error('Required configuration "{}" is missing in [ssh] section.'.format(field))
sys.exit(2)
return medusa_config
def _zip_fields_with_arg_values(fields, args):
return [(field, args[field]) for field in fields]
def evaluate_boolean(value):
# same behaviour as python's configparser
if value.lower() in ('0', 'false', 'no', 'off'):
return False
elif value.lower() in ('1', 'true', 'yes', 'on'):
return True
else:
raise TypeError('{} not a boolean'.format(value))
def _namedtuple_from_dict(cls, data):
return cls(**{
field: data.get(field)
for field in cls._fields
})
| 31.657005 | 120 | 0.654662 |
4a23435b1bb7abe7ad949d7dad9a9bd2b68044f8 | 160 | py | Python | submit_site/post_funcs.py | tai-korestate/budongsanbuddy | 8f4ddbcc2bc68c50394b62decee4882c7837fec6 | [
"MIT"
] | null | null | null | submit_site/post_funcs.py | tai-korestate/budongsanbuddy | 8f4ddbcc2bc68c50394b62decee4882c7837fec6 | [
"MIT"
] | null | null | null | submit_site/post_funcs.py | tai-korestate/budongsanbuddy | 8f4ddbcc2bc68c50394b62decee4882c7837fec6 | [
"MIT"
] | null | null | null | def upload_file(fil_name):
with open(string(fil_name),"wb+") as destination:
for chunk in fil_name.chunk():
destination.write(chunk)
| 26.666667 | 53 | 0.64375 |
4a2343e77d46624c6170f58774fcd3d0f75a665a | 5,887 | py | Python | aiomisc/entrypoint.py | dizballanze/aiomisc | 8c6444e4ffa56d82f1ba6947ad9e46a4020c4016 | [
"MIT"
] | 1 | 2019-03-07T11:13:30.000Z | 2019-03-07T11:13:30.000Z | aiomisc/entrypoint.py | dizballanze/aiomisc | 8c6444e4ffa56d82f1ba6947ad9e46a4020c4016 | [
"MIT"
] | null | null | null | aiomisc/entrypoint.py | dizballanze/aiomisc | 8c6444e4ffa56d82f1ba6947ad9e46a4020c4016 | [
"MIT"
] | null | null | null | import asyncio
import logging
import typing as t
from concurrent.futures._base import Executor
from .context import Context, get_context
from .log import LogFormat, basic_config
from .service import Service
from .signal import Signal
from .utils import create_default_event_loop, event_loop_policy
ExecutorType = Executor
class Entrypoint:
PRE_START = Signal()
POST_STOP = Signal()
async def _start(self) -> None:
if self.log_config:
basic_config(
level=self.log_level,
log_format=self.log_format,
buffered=True,
loop=self.loop,
buffer_size=self.log_buffer_size,
flush_interval=self.log_flush_interval,
)
for signal in (self.pre_start, self.post_stop):
signal.freeze()
await self.pre_start.call(entrypoint=self, services=self.services)
await asyncio.gather(
*[self._start_service(svc) for svc in self.services],
)
def __init__(
self, *services: Service, loop: asyncio.AbstractEventLoop = None,
pool_size: int = None,
log_level: t.Union[int, str] = logging.INFO,
log_format: t.Union[str, LogFormat] = "color",
log_buffer_size: int = 1024,
log_flush_interval: float = 0.2,
log_config: bool = True,
policy: asyncio.AbstractEventLoopPolicy = event_loop_policy,
debug: bool = False
):
"""
:param debug: set debug to event loop
:param loop: loop
:param services: Service instances which will be starting.
:param pool_size: thread pool size
:param log_level: Logging level which will be configured
:param log_format: Logging format which will be configures
:param log_buffer_size: Buffer size for logging
:param log_flush_interval: interval in seconds for flushing logs
:param log_config: if False do not configure logging
"""
self._debug = debug
self._loop = loop
self._loop_owner = False
self._thread_pool = None # type: t.Optional[ExecutorType]
self.ctx = None # type: t.Optional[Context]
self.log_buffer_size = log_buffer_size
self.log_config = log_config
self.log_flush_interval = log_flush_interval
self.log_format = log_format
self.log_level = log_level
self.policy = policy
self.pool_size = pool_size
self.services = services
self.shutting_down = False
self.pre_start = self.PRE_START.copy()
self.post_stop = self.POST_STOP.copy()
self._closing = None # type: t.Optional[asyncio.Event]
if self.log_config:
basic_config(
level=self.log_level,
log_format=self.log_format,
buffered=False,
)
async def closing(self) -> None:
# Lazy initialization because event loop might be not exists
if self._closing is None:
self._closing = asyncio.Event()
await self._closing.wait()
@property
def loop(self) -> asyncio.AbstractEventLoop:
if self._loop is None:
self._loop, self._thread_pool = create_default_event_loop(
pool_size=self.pool_size,
policy=self.policy,
debug=self._debug,
)
self._loop_owner = True
return self._loop
def __del__(self) -> None:
if self._loop and self._loop.is_closed():
return
if self._loop_owner and self._loop is not None:
self._loop.close()
def __enter__(self) -> asyncio.AbstractEventLoop:
self.loop.run_until_complete(self.__aenter__())
return self.loop
def __exit__(
self, exc_type: t.Any, exc_val: t.Any, exc_tb: t.Any,
) -> None:
if self.loop.is_closed():
return
self.loop.run_until_complete(
self.__aexit__(exc_type, exc_val, exc_tb),
)
if self._loop_owner and self._loop is not None:
self._loop.close()
async def __aenter__(self) -> "Entrypoint":
if self._loop is None:
# When __aenter__ called without __enter__
self._loop = asyncio.get_event_loop()
self.ctx = Context(loop=self.loop)
await self._start()
return self
async def __aexit__(
self, exc_type: t.Any, exc_val: t.Any, exc_tb: t.Any
) -> None:
try:
if self.loop.is_closed():
return
await self.graceful_shutdown(exc_val)
self.shutting_down = True
finally:
if self.ctx:
self.ctx.close()
if self._thread_pool:
self._thread_pool.shutdown()
async def _start_service(
self, svc: Service
) -> None:
svc.set_loop(self.loop)
start_task, ev_task = map(
asyncio.ensure_future, (
svc.start(), svc.start_event.wait(),
),
)
await asyncio.wait(
(start_task, ev_task),
return_when=asyncio.FIRST_COMPLETED,
)
self.loop.call_soon(svc.start_event.set)
await ev_task
if start_task.done():
await start_task
return
return None
async def graceful_shutdown(self, exception: Exception) -> None:
if self._closing:
self._closing.set()
tasks = [
asyncio.shield(svc.stop(exception)) for svc in self.services
]
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
await self.post_stop.call(entrypoint=self)
await self.loop.shutdown_asyncgens()
entrypoint = Entrypoint
__all__ = ("entrypoint", "Entrypoint", "get_context")
| 28.857843 | 74 | 0.597758 |
4a23441477b2ba3b04c45b611a7f606bfe635d3f | 7,022 | py | Python | acloud/_internal.py | rfc2119/acloud-dl | a17f1c4c31c48b64f23d8bd29d88e228489322f7 | [
"MIT"
] | null | null | null | acloud/_internal.py | rfc2119/acloud-dl | a17f1c4c31c48b64f23d8bd29d88e228489322f7 | [
"MIT"
] | null | null | null | acloud/_internal.py | rfc2119/acloud-dl | a17f1c4c31c48b64f23d8bd29d88e228489322f7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2018 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import sys
import time
from ._colorized import *
from ._extract import CloudGuru
from ._shared import (
CloudGuruCourses,
CloudGuruCourseDownload,
CloudGuruCourse,
CloudGuruChapters,
CloudGuruLectures,
CloudLectureSubtitles,
CloudGuruLectureStreams,
CloudGuruLectureLectureAssets
)
from ._getpass import GetPass
class InternCloudGuruCourses(CloudGuruCourses, CloudGuru, GetPass):
def __init__(self, *args, **kwargs):
self._info = ''
super(InternCloudGuruCourses, self).__init__(*args, **kwargs)
def _fetch_courses(self):
if self._have_basic_courses:
return
if self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get('login') == 'successful':
courses = self._extract_accessible_courses()
self._courses = [InternCloudGuruCourseDownload(c, self) for c in courses]
self._have_basic_courses = True
class InternCloudGuruCourseDownload(CloudGuruCourseDownload):
def __init__(self, course, parent):
self._info = course
self._session = parent._session
super(InternCloudGuruCourseDownload, self).__init__()
self._id = self._info.get("uniqueid")
self._title = self._info.get("title")
def _process_course(self, keep_alive):
self._course = InternCloudGuruCourse(self._info, self._session, keep_alive)
class InternCloudGuruCourse(CloudGuruCourse, CloudGuru):
def __init__(self, course, session, keep_alive):
self._info = ''
self._course = course
self._session = session
self._keep_alive = keep_alive
super(InternCloudGuruCourse, self).__init__()
def _fetch_course(self):
if self._have_basic:
return
course_id = self._course.get("uniqueid")
self._info = self._real_extract(course_id=course_id)
self._id = self._info['course_id']
self._url = self._info['course_url']
self._title = self._info['course_title']
self._chapters_count = self._info['total_chapters']
self._total_lectures = self._info['total_lectures']
self._chapters = [InternCloudGuruChapter(z) for z in self._info['chapters']]
if not self._keep_alive:
self._logout()
self._have_basic = True
class InternCloudGuruChapter(CloudGuruChapters):
def __init__(self, chapter):
super(InternCloudGuruChapter, self).__init__()
self._chapter_id = chapter['chapter_id']
self._chapter_title = chapter['chapter_title']
self._chapter_index = chapter['chapter_index']
self._lectures_count = chapter['lectures_count']
self._lectures = [InternCloudGuruLecture(z) for z in chapter['lectures']]
class InternCloudGuruLecture(CloudGuruLectures):
def __init__(self, lectures):
super(InternCloudGuruLecture, self).__init__()
self._info = lectures
self._lecture_id = self._info['lecture_id']
self._lecture_title = self._info['lecture_title']
self._lecture_index = self._info['lecture_index']
self._sources_count = self._info['sources_count']
self._assets_count = self._info['assets_count']
self._extension = self._info.get('extension') or None
self._duration = self._info.get('duration') or None
if self._duration:
duration = int(self._duration)
(mins, secs) = divmod(duration, 60)
(hours, mins) = divmod(mins, 60)
if hours == 0:
self._duration = "%02d:%02d" % (mins, secs)
else:
self._duration = "%02d:%02d:%02d" % (hours, mins, secs)
def _process_streams(self):
streams = [InternCloudGuruLectureStream(z, self) for z in self._info['sources']] if self._sources_count > 0 else []
self._streams = streams
def _process_assets(self):
assets = [InternCloudGuruLectureAssets(z, self) for z in self._info['assets']] if self._assets_count > 0 else []
self._assets = assets
def _process_subtitles(self):
subtitles = InternCloudLectureSubtitles(self._info['subtitle_url'], self) if self._info['subtitle_url'] else ""
self._subtitle = subtitles
class InternCloudGuruLectureStream(CloudGuruLectureStreams):
def __init__(self, sources, parent):
super(InternCloudGuruLectureStream, self).__init__(parent)
self._mediatype = sources.get('type')
self._extension = sources.get('extension')
height = sources.get('height') or 0
width = sources.get('width') or 0
self._resolution = '%sx%s' % (width, height)
self._dimention = width, height
self._quality = self._resolution
self._url = sources.get('url')
self._path = sources.get('path')
self._fsize = sources.get('size')
class InternCloudGuruLectureAssets(CloudGuruLectureLectureAssets):
def __init__(self, assets, parent):
super(InternCloudGuruLectureAssets, self).__init__(parent)
self._mediatype = assets.get('type')
self._extension = assets.get('extension')
self._title = '{0:03d} {1!s}'.format(parent._lecture_index, assets.get('filename'))
self._url = assets.get('url')
class InternCloudLectureSubtitles(CloudLectureSubtitles):
def __init__(self, subtitle_url, parent):
super(InternCloudLectureSubtitles, self).__init__(parent)
self._mediatype = "sub"
self._extension = "vtt"
self._language = "en"
self._url = subtitle_url | 39.011111 | 168 | 0.660496 |
4a23442f436456b900bf2b2e1e9c5799f4c8936d | 2,028 | py | Python | leather/scales/ordinal.py | nickromano/django-daily-digest | 8f9a289d772cfd6b6c72536dd40c2012516b9d28 | [
"MIT"
] | 6 | 2019-03-02T09:16:12.000Z | 2021-08-17T13:54:49.000Z | leather/scales/ordinal.py | nickromano/django-daily-digest | 8f9a289d772cfd6b6c72536dd40c2012516b9d28 | [
"MIT"
] | 66 | 2018-01-04T07:25:13.000Z | 2022-03-29T09:19:09.000Z | leather/scales/ordinal.py | nickromano/django-daily-digest | 8f9a289d772cfd6b6c72536dd40c2012516b9d28 | [
"MIT"
] | 2 | 2019-09-03T09:35:44.000Z | 2021-12-28T15:29:13.000Z | #!/usr/bin/env python
from decimal import Decimal
from leather.scales.base import Scale
class Ordinal(Scale):
"""
A scale that maps individual values (e.g. strings) to a range.
"""
def __init__(self, domain):
self._domain = domain
def contains(self, v):
"""
Return :code:`True` if a given value is contained within this scale's
displayed domain.
"""
return v in self._domain
def project(self, value, range_min, range_max):
"""
Project a value in this scale's domain to a target range.
"""
range_min = Decimal(range_min)
range_max = Decimal(range_max)
segments = len(self._domain)
segment_size = (range_max - range_min) / segments
try:
pos = (
range_min
+ (self._domain.index(value) * segment_size)
+ (segment_size / 2)
)
except ValueError:
raise ValueError(
'Value "%s" is not present in Ordinal scale domain' % value
)
return pos
def project_interval(self, value, range_min, range_max):
"""
Project a value in this scale's domain to an interval in the target
range. This is used for places :class:`.Bars` and :class:`.Columns`.
"""
range_min = Decimal(range_min)
range_max = Decimal(range_max)
segments = len(self._domain)
segment_size = (range_max - range_min) / segments
gap = segment_size / Decimal(20)
try:
a = range_min + (self._domain.index(value) * segment_size) + gap
b = range_min + ((self._domain.index(value) + 1) * segment_size) - gap
except ValueError:
raise ValueError(
'Value "%s" is not present in Ordinal scale domain' % value
)
return (a, b)
def ticks(self):
"""
Generate a series of ticks for this scale.
"""
return self._domain
| 27.780822 | 82 | 0.560651 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.