content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import time
import pyperclip
import csv
import subprocess
import serial
ser = serial.Serial('/dev/cu.usbmodemFD131', baudrate=9600, timeout=None)
clipboard_old = pyperclip.paste()
musicFile = "music/yes_1.mp3"
musicFile_rick = "music/rickroll.mp3"
failText = "Fail. No, bubbles, for you."
rickText = "Fail. But don't worry. I'm never, gonna give you up."
#local information
def check_status(bar=1,bulge=0):
numFails = 0
clipboard_old = pyperclip.paste()
while True:
clipboard = pyperclip.paste()
if (clipboard != clipboard_old):
print "New ID!",clipboard
clipboard_old = clipboard
#Load data object for that classification
# Have lookup table of the form id, bar, bulge where bar&bulge are out of 1,0
classification=read_object_classification(clipboard_old) #in the form [id,bar,bulge]
#classification=['1ds4',1,0] #example of a barred galaxy withotu a bulge
print "Galaxy data",classification,"Location data",bar,bulge
status=bar==classification[1] and bulge==classification[2]
if status:
print "Success :) Do the things!"
ser.write('1\n')
return_code = subprocess.call(["afplay", musicFile])
ser.write('0\n')
time.sleep(0.5)
ser.write('M\n')
time.sleep(8)
ser.write('N\n')
else:
numFails += 1
if (numFails%5 != 0):
print "Fail :( No bubbles for you"
return_code = subprocess.call(["say", failText])
else:
print "Fail :( No bubbles for you, but here's a Rickroll anyway..."
return_code = subprocess.call(["say", rickText])
#ser.write('1\n')
return_code = subprocess.call(["afplay", musicFile_rick])
#ser.write('0\n')
print '-------------'
time.sleep(0.5)
headers={'Content-Type':'application/json','Accept':'application/vnd.api+json; version=1'}
def read_object_classification(clipboard_old):
filename="classification_data.csv"
with open(filename) as f:
reader=csv.reader(f,delimiter=',')
next(reader)
for row in reader:
if row[0]==str(clipboard_old):
row=[int(item) for item in row]
return row
print "Id not found. Return dummy data"
return ['0000000',2,2]
def write_example_file():
filename="classification_data.csv"
IDS=['1243233','2345473','2233432','9987679','3345363','3934322']
bulge=[0,0,0,1,1,1]
bar=[1,0,0,1,0,1]
with open(filename,'w') as f:
writer=csv.writer(f)
writer.writerow(['Id','bulge','bar'])
for i in range(len(IDS)):
writer.writerow([IDS[i],bulge[i],bar[i]])
| 32.163043 | 98 | 0.56438 | [
"MIT"
] | chrislintott/GZMaze | qrcodetoclassification.py | 2,959 | Python |
import numpy as np
import pytest
from astropy.cosmology import default_cosmology
from skypy.linear.eisenstein_hu import power_spectrum
def test_eisenstein_hu():
""" Test Eisenstein & Hu Linear matter power spectrum with
and without wiggles using astropy default cosmology"""
cosmology = default_cosmology.get()
A_s = 2.1982e-09
n_s = 0.969453
kwmap = 0.02
# Test that a scalar input gives a scalar output
scalar_input = 1
scalar_output_w = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap,
wiggle=True)
scalar_output_nw = power_spectrum(scalar_input, A_s, n_s, cosmology, kwmap,
wiggle=False)
assert np.isscalar(scalar_output_w)
assert np.isscalar(scalar_output_nw)
# Test that an array input gives an array output
array_shape = (10,)
array_input = np.random.uniform(size=array_shape)
array_output_w = power_spectrum(array_input, A_s, n_s, cosmology, kwmap,
wiggle=True)
array_output_nw = power_spectrum(array_input, A_s, n_s, cosmology, kwmap,
wiggle=False)
assert array_output_w.shape == array_shape
assert array_output_nw.shape == array_shape
# Test pk against precomputed values for default_cosmology
wavenumber = np.logspace(-3, 1, num=5, base=10.0)
pk_eisensteinhu_w = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap,
wiggle=True)
pk_eisensteinhu_nw = power_spectrum(wavenumber, A_s, n_s, cosmology, kwmap,
wiggle=False)
pk_cosmosis_w = np.array([6.47460158e+03, 3.71610099e+04, 9.65702614e+03,
1.14604456e+02, 3.91399918e-01])
pk_cosmosis_nw = np.array([6.47218600e+03, 3.77330704e+04, 1.00062077e+04,
1.13082980e+02, 3.83094714e-01])
assert np.allclose(pk_eisensteinhu_w, pk_cosmosis_w)
assert np.allclose(pk_eisensteinhu_nw, pk_cosmosis_nw)
# Test for failure when wavenumber <= 0
negative_wavenumber_scalar = 0
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap,
wiggle=True)
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_scalar, A_s, n_s, cosmology, kwmap,
wiggle=False)
negative_wavenumber_array = [0, 1, -2, 3]
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap,
wiggle=True)
with pytest.raises(ValueError):
power_spectrum(negative_wavenumber_array, A_s, n_s, cosmology, kwmap,
wiggle=False)
| 44.349206 | 79 | 0.644596 | [
"BSD-3-Clause"
] | Lucia-Fonseca/skypy | skypy/linear/tests/test_eisenstein_hu.py | 2,794 | Python |
"""Contains the CLI."""
import sys
import json
import logging
import oyaml as yaml
import click
# For the profiler
import pstats
from io import StringIO
# To enable colour cross platform
import colorama
from sqlfluff.cli.formatters import (
format_rules,
format_violation,
format_linting_result_header,
format_linting_stats,
colorize,
format_dialect_warning,
format_dialects,
CallbackFormatter,
)
from sqlfluff.cli.helpers import cli_table, get_package_version
# Import from sqlfluff core.
from sqlfluff.core import (
Linter,
FluffConfig,
SQLLintError,
dialect_selector,
dialect_readout,
TimingSummary,
)
class RedWarningsFilter(logging.Filter):
"""This filter makes all warnings or above red."""
def filter(self, record):
"""Filter any warnings (or above) to turn them red."""
if record.levelno >= logging.WARNING:
record.msg = colorize(record.msg, "red") + " "
return True
def set_logging_level(verbosity, logger=None, stderr_output=False):
"""Set up logging for the CLI.
We either set up global logging based on the verbosity
or, if `logger` is specified, we only limit to a single
sqlfluff logger. Verbosity is applied in the same way.
Implementation: If `logger` is not specified, the handler
is attached to the `sqlfluff` logger. If it is specified
then it attaches the the logger in question. In addition
if `logger` is specified, then that logger will also
not propagate.
"""
fluff_logger = logging.getLogger("sqlfluff")
# Don't propagate logging
fluff_logger.propagate = False
# Enable colorama
colorama.init()
# Set up the log handler to log to stdout
handler = logging.StreamHandler(stream=sys.stderr if stderr_output else sys.stdout)
# NB: the unicode character at the beginning is to squash any badly
# tamed ANSI colour statements, and return us to normality.
handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s"))
# Set up a handler to colour warnings red.
handler.addFilter(RedWarningsFilter())
if logger:
focus_logger = logging.getLogger("sqlfluff.{0}".format(logger))
focus_logger.addHandler(handler)
else:
fluff_logger.addHandler(handler)
# NB: We treat the parser logger slightly differently because it's noisier.
# It's important that we set levels for all each time so
# that we don't break tests by changing the granularity
# between tests.
parser_logger = logging.getLogger("sqlfluff.parser")
if verbosity < 3:
fluff_logger.setLevel(logging.WARNING)
parser_logger.setLevel(logging.NOTSET)
elif verbosity == 3:
fluff_logger.setLevel(logging.INFO)
parser_logger.setLevel(logging.WARNING)
elif verbosity == 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.INFO)
elif verbosity > 4:
fluff_logger.setLevel(logging.DEBUG)
parser_logger.setLevel(logging.DEBUG)
def common_options(f):
"""Add common options to commands via a decorator.
These are applied to all of the cli commands.
"""
f = click.version_option()(f)
f = click.option(
"-v",
"--verbose",
count=True,
help=(
"Verbosity, how detailed should the output be. This is *stackable*, so `-vv`"
" is more verbose than `-v`. For the most verbose option try `-vvvv` or `-vvvvv`."
),
)(f)
f = click.option(
"-n",
"--nocolor",
is_flag=True,
help="No color - if this is set then the output will be without ANSI color codes.",
)(f)
return f
def core_options(f):
"""Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`.
"""
f = click.option(
"--dialect", default=None, help="The dialect of SQL to lint (default=ansi)"
)(f)
f = click.option(
"--templater", default=None, help="The templater to use (default=jinja)"
)(f)
f = click.option(
"--rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules, to check',
help=(
"Narrow the search to only specific rules. For example "
"specifying `--rules L001` will only search for rule `L001` (Unnecessary "
"trailing whitespace). Multiple rules can be specified with commas e.g. "
"`--rules L001,L002` will specify only looking for violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--exclude-rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules to exclude',
help=(
"Exclude specific rules. For example "
"specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary "
"trailing whitespace) from the set of considered rules. This could either "
"be the whitelist, or the general set if there is no specific whitelist. "
"Multiple rules can be specified with commas e.g. "
"`--exclude-rules L001,L002` will exclude violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--ignore",
default=None,
help=(
"Ignore particular families of errors so that they don't cause a failed "
"run. For example `--ignore parsing` would mean that any parsing errors "
"are ignored and don't influence the success or fail of a run. Multiple "
"options are possible if comma separated e.g. `--ignore parsing,templating`."
),
)(f)
f = click.option(
"--bench",
is_flag=True,
help="Set this flag to engage the benchmarking tool output.",
)(f)
f = click.option(
"--logger",
type=click.Choice(["parser", "linter", "rules"], case_sensitive=False),
help="Choose to limit the logging to one of the loggers.",
)(f)
return f
def get_config(**kwargs):
"""Get a config object from kwargs."""
if kwargs.get("dialect", None):
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(kwargs["dialect"])
except KeyError:
click.echo("Error: Unknown dialect {0!r}".format(kwargs["dialect"]))
sys.exit(66)
# Instantiate a config object (filtering out the nulls)
overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None}
return FluffConfig.from_root(overrides=overrides)
def get_linter_and_formatter(cfg, silent=False):
"""Get a linter object given a config."""
try:
# We're just making sure it exists at this stage - it will be fetched properly in the linter
dialect_selector(cfg.get("dialect"))
except KeyError:
click.echo("Error: Unknown dialect {0!r}".format(cfg.get("dialect")))
sys.exit(66)
if not silent:
# Instantiate the linter and return (with an output function)
formatter = CallbackFormatter(
callback=lambda m: click.echo(m, color=cfg.get("color")),
verbosity=cfg.get("verbose"),
output_line_length=cfg.get("output_line_length"),
)
return Linter(config=cfg, formatter=formatter), formatter
else:
# Instantiate the linter and return. NB: No formatter
# in the Linter and a black formatter otherwise.
formatter = CallbackFormatter(callback=lambda m: None, verbosity=0)
return Linter(config=cfg), formatter
@click.group()
@click.version_option()
def cli():
"""Sqlfluff is a modular sql linter for humans."""
@cli.command()
@common_options
def version(**kwargs):
"""Show the version of sqlfluff."""
c = get_config(**kwargs)
if c.get("verbose") > 0:
# Instantiate the linter
lnt, formatter = get_linter_and_formatter(c)
# Dispatch the detailed config from the linter.
formatter.dispatch_config(lnt)
else:
# Otherwise just output the package version.
click.echo(get_package_version(), color=c.get("color"))
@cli.command()
@common_options
def rules(**kwargs):
"""Show the current rules in use."""
c = get_config(**kwargs)
lnt, _ = get_linter_and_formatter(c)
click.echo(format_rules(lnt), color=c.get("color"))
@cli.command()
@common_options
def dialects(**kwargs):
"""Show the current dialects available."""
c = get_config(**kwargs)
click.echo(format_dialects(dialect_readout), color=c.get("color"))
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--format",
"format",
default="human",
type=click.Choice(["human", "json", "yaml"], case_sensitive=False),
help="What format to return the lint result in.",
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
@click.option(
"--disregard-sqlfluffignores",
is_flag=True,
help=("Perform the operation regardless of .sqlfluffignore configurations"),
)
@click.option(
"-p",
"--parallel",
type=int,
default=1,
help="If set to a value higher than 1, run SQLFluff in parallel, "
"speeding up processing.",
)
@click.argument("paths", nargs=-1)
def lint(
paths,
parallel,
format,
nofail,
disregard_sqlfluffignores,
logger=None,
bench=False,
**kwargs,
):
"""Lint SQL files via passing a list of files or using stdin.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
Linting SQL files:
sqlfluff lint path/to/file.sql
sqlfluff lint directory/of/sql/files
Linting a file via stdin (note the lone '-' character):
cat path/to/file.sql | sqlfluff lint -
echo 'select col from tbl' | sqlfluff lint -
"""
c = get_config(**kwargs)
non_human_output = format in ("json", "yaml")
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# add stdin if specified via lone '-'
if ("-",) == paths:
result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin")
else:
# Output the results as we go
if verbose >= 1:
click.echo(format_linting_result_header())
try:
result = lnt.lint_paths(
paths,
ignore_non_existent_files=False,
ignore_files=not disregard_sqlfluffignores,
parallel=parallel,
)
except IOError:
click.echo(
colorize(
"The path(s) {0!r} could not be accessed. Check it/they exist(s).".format(
paths
),
"red",
)
)
sys.exit(1)
# Output the final stats
if verbose >= 1:
click.echo(format_linting_stats(result, verbose=verbose))
if format == "json":
click.echo(json.dumps(result.as_records()))
elif format == "yaml":
click.echo(yaml.dump(result.as_records()))
if bench:
click.echo("==== overall timings ====")
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
if not nofail:
if not non_human_output:
click.echo("All Finished 📜 🎉!")
sys.exit(result.stats()["exit code"])
else:
sys.exit(0)
def do_fixes(lnt, result, formatter=None, **kwargs):
"""Actually do the fixes."""
click.echo("Persisting Changes...")
res = result.persist_changes(formatter=formatter, **kwargs)
if all(res.values()):
click.echo("Done. Please check your files to confirm.")
return True
# If some failed then return false
click.echo("Done. Some operations failed. Please check your files to confirm.")
click.echo("Some errors cannot be fixed or there is another error blocking it.")
return False
@cli.command()
@common_options
@core_options
@click.option(
"-f",
"--force",
is_flag=True,
help=(
"skip the confirmation prompt and go straight to applying "
"fixes. **Use this with caution.**"
),
)
@click.option(
"--fixed-suffix", default=None, help="An optional suffix to add to fixed files."
)
@click.option(
"--parallel",
type=int,
default=1,
help="If set to a value higher than 1, run SQLFluff in parallel, "
"speeding up processing.",
)
@click.argument("paths", nargs=-1)
def fix(force, paths, parallel, bench=False, fixed_suffix="", logger=None, **kwargs):
"""Fix SQL files.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
# some quick checks
fixing_stdin = ("-",) == paths
c = get_config(**kwargs)
lnt, formatter = get_linter_and_formatter(c, silent=fixing_stdin)
verbose = c.get("verbose")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=fixing_stdin)
# handle stdin case. should output formatted sql to stdout and nothing else.
if fixing_stdin:
stdin = sys.stdin.read()
result = lnt.lint_string_wrapped(stdin, fname="stdin", fix=True)
stdout = result.paths[0].files[0].fix_string()[0]
click.echo(stdout, nl=False)
sys.exit()
# Lint the paths (not with the fix argument at this stage), outputting as we go.
click.echo("==== finding fixable violations ====")
try:
result = lnt.lint_paths(
paths, fix=True, ignore_non_existent_files=False, parallel=parallel
)
except IOError:
click.echo(
colorize(
"The path(s) {0!r} could not be accessed. Check it/they exist(s).".format(
paths
),
"red",
)
)
sys.exit(1)
# NB: We filter to linting violations here, because they're
# the only ones which can be potentially fixed.
if result.num_violations(types=SQLLintError, fixable=True) > 0:
click.echo("==== fixing violations ====")
click.echo(
"{0} fixable linting violations found".format(
result.num_violations(types=SQLLintError, fixable=True)
)
)
if force:
click.echo(colorize("FORCE MODE", "red") + ": Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1)
else:
click.echo(
"Are you sure you wish to attempt to fix these? [Y/n] ", nl=False
)
c = click.getchar().lower()
click.echo("...")
if c in ("y", "\r", "\n"):
click.echo("Attempting fixes...")
success = do_fixes(
lnt,
result,
formatter,
types=SQLLintError,
fixed_file_suffix=fixed_suffix,
)
if not success:
sys.exit(1)
else:
click.echo("All Finished 📜 🎉!")
elif c == "n":
click.echo("Aborting...")
else:
click.echo("Invalid input, please enter 'Y' or 'N'")
click.echo("Aborting...")
else:
click.echo("==== no fixable linting violations found ====")
if result.num_violations(types=SQLLintError, fixable=False) > 0:
click.echo(
" [{0} unfixable linting violations found]".format(
result.num_violations(types=SQLLintError, fixable=False)
)
)
click.echo("All Finished 📜 🎉!")
if bench:
click.echo("==== overall timings ====")
timing_summary = result.timing_summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
sys.exit(0)
def quoted_presenter(dumper, data):
"""Re-presenter which always double quotes string values needing escapes."""
if "\n" in data or "\t" in data or "'" in data:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"')
else:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="")
@cli.command()
@common_options
@core_options
@click.argument("path", nargs=1)
@click.option(
"--recurse", default=0, help="The depth to recursively parse to (0 for unlimited)"
)
@click.option(
"-c",
"--code-only",
is_flag=True,
help="Output only the code elements of the parse tree.",
)
@click.option(
"-f",
"--format",
default="human",
type=click.Choice(["human", "json", "yaml"], case_sensitive=False),
help="What format to return the parse result in.",
)
@click.option(
"--profiler", is_flag=True, help="Set this flag to engage the python profiler."
)
@click.option(
"--nofail",
is_flag=True,
help=(
"If set, the exit code will always be zero, regardless of violations "
"found. This is potentially useful during rollout."
),
)
def parse(path, code_only, format, profiler, bench, nofail, logger=None, **kwargs):
"""Parse SQL files and just spit out the result.
PATH is the path to a sql file or directory to lint. This can be either a
file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
be interpreted like passing the current working directory as a path argument.
"""
c = get_config(**kwargs)
# We don't want anything else to be logged if we want json or yaml output
non_human_output = format in ("json", "yaml")
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
recurse = c.get("recurse")
formatter.dispatch_config(lnt)
# Set up logging.
set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)
# TODO: do this better
nv = 0
if profiler:
# Set up the profiler if required
try:
import cProfile
except ImportError:
click.echo("The cProfiler is not available on your platform.")
sys.exit(1)
pr = cProfile.Profile()
pr.enable()
try:
# handle stdin if specified via lone '-'
if "-" == path:
# put the parser result in a list to iterate later
result = [
lnt.parse_string(
sys.stdin.read(), "stdin", recurse=recurse, config=lnt.config
),
]
else:
# A single path must be specified for this command
result = lnt.parse_path(path, recurse=recurse)
# iterative print for human readout
if format == "human":
timing = TimingSummary()
for parsed_string in result:
timing.add(parsed_string.time_dict)
if parsed_string.tree:
click.echo(parsed_string.tree.stringify(code_only=code_only))
else:
# TODO: Make this prettier
click.echo("...Failed to Parse...")
nv += len(parsed_string.violations)
if parsed_string.violations:
click.echo("==== parsing violations ====")
for v in parsed_string.violations:
click.echo(format_violation(v))
if (
parsed_string.violations
and parsed_string.config.get("dialect") == "ansi"
):
click.echo(format_dialect_warning())
if verbose >= 2:
click.echo("==== timings ====")
click.echo(cli_table(parsed_string.time_dict.items()))
if verbose >= 2 or bench:
click.echo("==== overall timings ====")
timing_summary = timing.summary()
for step in timing_summary:
click.echo(f"=== {step} ===")
click.echo(cli_table(timing_summary[step].items()))
else:
# collect result and print as single payload
# will need to zip in the file paths
filepaths = ["stdin"] if "-" == path else lnt.paths_from_path(path)
result = [
dict(
filepath=filepath,
segments=parsed.as_record(code_only=code_only, show_raw=True)
if parsed
else None,
)
for filepath, (parsed, _, _, _, _) in zip(filepaths, result)
]
if format == "yaml":
# For yaml dumping always dump double quoted strings if they contain tabs or newlines.
yaml.add_representer(str, quoted_presenter)
click.echo(yaml.dump(result))
elif format == "json":
click.echo(json.dumps(result))
except IOError:
click.echo(
colorize(
"The path {0!r} could not be accessed. Check it exists.".format(path),
"red",
)
)
sys.exit(1)
if profiler:
pr.disable()
profiler_buffer = StringIO()
ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative")
ps.print_stats()
click.echo("==== profiler stats ====")
# Only print the first 50 lines of it
click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50]))
if nv > 0 and not nofail:
sys.exit(66)
else:
sys.exit(0)
# This "__main__" handler allows invoking SQLFluff using "python -m", which
# simplifies the use of cProfile, e.g.:
# python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql
if __name__ == "__main__":
cli.main(sys.argv[1:])
| 33.644928 | 104 | 0.598148 | [
"MIT"
] | tmastny/sqlfluff | src/sqlfluff/cli/commands.py | 23,233 | Python |
from .__init__ import *
def compoundInterestFunc(maxPrinciple=10000,
maxRate=10,
maxTime=10,
maxPeriod=10):
p = random.randint(100, maxPrinciple)
r = random.randint(1, maxRate)
t = random.randint(1, maxTime)
n = random.randint(1, maxPeriod)
A = p * ((1 + (r / (100 * n))**(n * t)))
problem = "Compound Interest for a principle amount of " + str(
p) + " dollars, " + str(
r) + "% rate of interest and for a time period of " + str(
t) + " compounded monthly is = "
solution = round(A, 2)
return problem, solution
| 34.526316 | 70 | 0.525915 | [
"MIT"
] | anshitabaid/mathgenerator | mathgenerator/funcs/compoundInterestFunc.py | 656 | Python |
import tensorflow as tf
import numpy as np
def body(x):
a = tf.random_uniform(shape=[2, 2], dtype=tf.int32, maxval=100)
b = tf.constant(np.array([[1, 2], [3, 4]]), dtype=tf.int32)
c = a + b
return tf.nn.relu(x + c)
def condition(x):
return tf.reduce_sum(x) < 100
x = tf.Variable(tf.constant(0, shape=[2, 2]))
with tf.Session():
tf.global_variables_initializer().run()
result = tf.while_loop(condition, body, [x])
print(result.eval())
| 24.947368 | 67 | 0.628692 | [
"MIT"
] | babaozhouy5/tensorflow_learning | 08_midi_generate/test.py | 474 | Python |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
A1 = [1, 2, 4, 5, 6, 6, 8, 9]
A2 = [2, 5, 6, 7, 8, 8, 9]
def find_closest_num(A, target):
min_diff = float("inf")
low = 0
high = len(A) - 1
closest_num = None
# Edge cases for empty list of list
# with only one element:
if len(A) == 0:
return None
if len(A) == 1:
return A[0]
while low <= high:
mid = (low + high)//2
# Ensure you do not read beyond the bounds
# of the list.
if mid+1 < len(A):
min_diff_right = abs(A[mid + 1] - target)
if mid > 0:
min_diff_left = abs(A[mid - 1] - target)
# Check if the absolute value between left
# and right elements are smaller than any
# seen prior.
if min_diff_left < min_diff:
min_diff = min_diff_left
closest_num = A[mid - 1]
if min_diff_right < min_diff:
min_diff = min_diff_right
closest_num = A[mid + 1]
# Move the mid-point appropriately as is done
# via binary search.
if A[mid] < target:
low = mid + 1
elif A[mid] > target:
high = mid - 1
# If the element itself is the target, the closest
# number to it is itself. Return the number.
else:
return A[mid]
return closest_num
print(find_closest_num(A1, 11))
print(find_closest_num(A2, 4))
# In[ ]:
| 21.776119 | 58 | 0.5305 | [
"MPL-2.0"
] | grvkmrpandit/data-structures-and-algorithms | dsa/closestnumber.py | 1,459 | Python |
from threading import Thread
from time import sleep
from pytezos import pytezos
import argparse
contract_dict = {}
def contract_origin_search(p, contract_hash, verbose = 0):
start = 0
end = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
found = -1
data = None
while found == -1:
anchor = int((end+start)/2)
try:
data = contract.storage(anchor)
try:
data = contract.storage(anchor-1)
end=anchor
except Exception:
found = anchor
except Exception :
start = anchor
if verbose:
print("Ntf origin:", contract_hash, found, data, "\n")
return found, data
def contract_all_update_search(p, contract_hash, start=-1, end=-1):
results = []
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash, verbose=1)
start = start
if origin_level > start or start ==-1:
start = origin_level
results.append([origin_level, data])
else:
data = contract.storage(start)
results.append([start, data])
end = end
if end > head_level or end ==-1:
end = head_level
for lvl in range(start+1, end+1):
if contract_hash not in contract_dict.keys():
break
data = contract.storage(lvl)
if data != results[len(results)-1][1]:
print("Ntf past", contract_hash, lvl, data, "\n")
results.append([lvl, data])
sleep(2) # TO REMOVE, added as test vector has too many updates
return start, end, results
def contract_first_update_search(p, contract_hash, start=-1):
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash)
if start > head_level:
return -1, [-1, None]
start = start
if origin_level > start:
start = origin_level
for lvl in range(start+1, head_level+1):
new_data = contract.storage(lvl)
if new_data != data:
print("Ntf first:", contract_hash, start, lvl, new_data, "\n")
return start, [lvl, new_data]
return start, [-1, None]
def contract_last_update_search(p, contract_hash, end=-1):
head_level = p.shell.head.header()["level"]
contract = p.contract(contract_hash)
origin_level, data = contract_origin_search(p, contract_hash)
if end > 0 and end < origin_level:
return -1, [-1, None]
end = end
if end == -1 or end > head_level:
end = head_level
for lvl in range(end, origin_level, -1):
new_data = contract.storage(lvl)
prev_data = contract.storage(lvl-1)
if new_data != prev_data:
print("Ntf end:", contract_hash, end, lvl, new_data, "\n")
return end, [lvl, new_data]
return end, [-1, None]
def read_from_head(p):
global contract_dict
while len(contract_dict) != 0:
for contract_hash in contract_dict.keys():
head_level = p.shell.head.header()["level"]
data = p.contract(contract_hash).storage(head_level)
if data != contract_dict[contract_hash]["last_data"]:
print("Ntf head:", contract_hash, head_level, data, "\n")
contract_dict[contract_hash]["last_data"] = data
sleep(5) # TO REMOVE
def main():
global contract_dict
# Instantiate the parser
parser = argparse.ArgumentParser(description='Optional app description')
parser.add_argument('-c', '--contract', type=str, help="the hash of the contract to scan")
parser.add_argument("-net", "--network", type=str, help="the network, such as mainnet, carthagenet, dalphanet, delphinet or a RPC node uri", default="mainnet")
parser.add_argument("-org", "--origin", help="find the level when the contract was deployed", action="store_true")
parser.add_argument("-fst", "--first", help="find the contract's first update", action="store_true")
parser.add_argument("-lst", "--last", help="find the contract's last update", action="store_true")
parser.add_argument("-stt", "--start", type=int, help="index from where to start the scan", default=-1)
parser.add_argument("-hash", "--hash", type=int, help="block hash from where to scan")
parser.add_argument("-end", "--end", type=int, help="index until which to start the scan (from which for last update)", default=-1)
args = parser.parse_args()
contract_hash = args.contract
if args.contract is None:
print("Error: Specify contract hash", "\n")
return
# Set network and get head's level
network = args.network
p = pytezos.using(shell="mainnet")
head_level = -1
try:
p = pytezos.using(shell=network)
head_level = p.shell.head.header()["level"]
except Exception as e:
print("Error: Network error", e, "\n")
return
# Set the scan lower and upper bounds
start = args.start
if args.hash is not None:
try:
block = p.shell.chains.main.blocks[args.hash]
start = block.header()["level"]
except Exception as e:
print("Error: block not found", e, "\n")
return
end = args.end
# Check contract exists
ci = None
storage = None
try:
ci = p.contract(contract_hash)
storage = ci.storage(head_level)
except Exception as e:
print("Error: contract not found", e, "\n")
return
# Return first update's level if asked
if args.first == True:
Thread(target=contract_first_update_search, args=(p, contract_hash,), kwargs={"start":start}).start()
# Return last update's level if asked
if args.last == True:
thread = Thread(target=contract_last_update_search, args=(p, contract_hash,), kwargs={"end":end}).start()
# Return origination's level if asked
if args.origin == True:
thread = Thread(target=contract_origin_search, args=(p, contract_hash,), kwargs={"verbose":1}).start()
# Return all updates' levels if asked
if (args.first == False and args.last == False and args.origin == False):
if contract_hash not in contract_dict.keys():
end2 = head_level
if end <= head_level:
end2 = head_level
Thread(target=contract_all_update_search, args=(p, contract_hash,), kwargs={"start":start, "end":end2}).start()
if end == -1 or end > head_level:
contract_dict[contract_hash]={"last_data":storage}
Thread(target=read_from_head, args=(p,)).start()
else:
print("Error: contract already being scanned.", "\n")
# Start loop to enter or remove notification requests
while len(contract_dict) != 0:
try:
# Send hint and listen to input
inputs = input("\n\nFunctions:\n add <hash> --start <start> --end <end>\n remove <hash>\n origin <hash> \n first <hash> --start <start>\n last <hash> --end <end>\n list\n\n").strip()
inputs = inputs.split(" ")
# Parse input and look for function
if inputs[0].lower() in ["add", "remove", "origin", "first", "last", "list"]:
if inputs[0].lower() == "list":
for key in contract_dict.keys():
print(key)
print("\n")
else:
try:
contract_hash = inputs[1]
storage = p.contract(contract_hash).storage()
originated_level, originated_data = contract_origin_search(p, contract_hash)
head_level = p.shell.head.header()["level"]
# Check scan lower bound
start = -1
if "--start" in inputs:
stt = int(inputs[inputs.index("--start")+1])
start = stt
if stt < originated_level:
start = originated_level
# Check scan upper bound
end = -1
if "--end" in inputs:
end = int(inputs[inputs.index("--end")+1])
# Return first update's level if asked
if inputs[0] == "first":
Thread(target=contract_first_update_search, args=(p, contract_hash,), kwargs={"start":start}).start()
# Return last update's level if asked
if inputs[0] == "last":
Thread(target=contract_last_update_search, args=(p, contract_hash,), kwargs={"end":end}).start()
# Return origination's level if asked
if inputs[0] == "origin":
Thread(target=contract_origin_search, args=(p, contract_hash,), kwargs={"verbose":1}).start()
# Return all updates' levels if asked
if inputs[0] == "add":
end2 = head_level
if end <= head_level:
end2 = end
Thread(target=contract_all_update_search, args=(p, contract_hash,), kwargs={"start":start, "end":end2}).start()
if (end == -1 or end > head_level) and contract_hash not in contract_dict.keys():
contract_dict[contract_hash]={"last_data":storage}
if inputs[0] == "remove":
if contract_hash in contract_dict.keys():
del contract_dict[contract_hash]
print("Contract "+str(contract_hash)+" removed.\n")
except Exception as e:
print("Error: contract not found", e, "\n")
else:
print("Error command not recognized", inputs, "\n")
except Exception as e:
print(e)
print("No more contract to scan, closing program.\n")
def test_contract_origin():
contract = "KT19kgnqC5VWoxktLRdRUERbyUPku9YioE8W"
origin_lvl = 1073618
lvl, _ = contract_origin_search("mainnet", contract)
assert origin_lvl == lvl
def test_contract_first_update():
contract = "KT19kgnqC5VWoxktLRdRUERbyUPku9YioE8W"
first_update_lvl = 1073622
start, [lvl, _] = contract_first_update_search("mainnet", contract)
assert first_update_lvl == lvl
if __name__ == "__main__":
main()
| 40.416357 | 200 | 0.565121 | [
"MIT"
] | boltlabs-inc/libzkchannels | tezos-sandbox/watchtower/delphinet/passive_watchtower.py | 10,872 | Python |
from functools import partial
from dictknife.langhelpers import as_jsonpointer as _as_jsonpointer
from dictknife.langhelpers import as_path_node as _as_path_node
from dictknife import accessing
from dictknife import naming
def _make_key(k0, k1, *, sep="/"):
if k1 is None:
return _as_jsonpointer(str(k0))
return "{}{}{}".format(_as_jsonpointer(str(k0)), sep, k1)
def unflatten(d, *, sep="/", accessor=accessing.Accessor()):
r = accessor.make_dict()
for k, v in d.items():
accessor.assign(r, [_as_path_node(x) for x in k.split(sep)], v)
return _fix_unflatten_list(r)
def _fix_unflatten_list(d):
if hasattr(d, "keys"):
for k in list(d.keys()):
d[k] = _fix_unflatten_list(d[k])
# list ?
if "0" in d and str(len(d) - 1) in d:
r = []
for i in range(len(d)):
k = str(i)
if k not in d:
return d
r.append(d[k])
return r
return d
def flatten(d, *, sep="/"):
if isinstance(d, (list, tuple)):
return {
_make_key(i, k, sep=sep): v
for i, row in enumerate(d)
for k, v in flatten(row, sep=sep).items()
}
elif hasattr(d, "get"):
return {
_make_key(k, k2, sep=sep): v2
for k, v in d.items()
for k2, v2 in flatten(v, sep=sep).items()
}
elif hasattr(d, "__next__"):
# todo: as generator
return flatten(list(d), sep=sep)
else:
# todo: peformance improvement
return {None: _as_jsonpointer(d) if hasattr(d, "replace") else d}
def rows(d, *, kname="name", vname="value"):
return [{kname: k, vname: v} for k, v in d.items()]
def update_keys(d, *, key, coerce=str): # side effect!
if hasattr(d, "keys"):
for k, v in list(d.items()):
d[key(coerce(k))] = d.pop(k)
update_keys(v, key=key, coerce=coerce)
elif isinstance(d, (list, tuple)):
for x in d:
update_keys(x, key=key, coerce=coerce)
return d
str_dict = partial(update_keys, key=str)
normalize_dict = partial(update_keys, key=naming.normalize)
snakecase_dict = partial(update_keys, key=naming.snakecase)
camelcase_dict = partial(update_keys, key=naming.camelcase)
kebabcase_dict = partial(update_keys, key=naming.kebabcase)
pascalcase_dict = partial(update_keys, key=naming.pascalcase)
def only_num(d):
return {
k: v
for k, v in d.items()
if (isinstance(v, (int, float)) and not isinstance(v, bool))
or (hasattr(v, "isdigit") and v.isdigit())
}
def only_str(d):
return {k: v for k, v in d.items() if isinstance(v, str)}
def shrink(
d,
*,
max_length_of_string: int = 100,
cont_suffix: str = "...",
max_length_of_list: int = 3,
with_tail: bool = False,
mutable: bool = False,
):
# todo: random select
# todo: cont suffix for list
from dictknife.accessing import get_modifier
modifier = get_modifier(mutable=mutable)
def _map(d):
if isinstance(d, (list, tuple)):
xs = d
if len(xs) > max_length_of_list:
xs = d[:max_length_of_list]
if with_tail:
xs.extend(d[-max_length_of_list:])
return modifier.modify_list(_map, xs)
elif hasattr(d, "keys"):
return modifier.modify_dict(_map, d)
elif isinstance(d, str):
s = d
if len(s) > max_length_of_string:
s = s[:max_length_of_string] + cont_suffix
return s
else:
return d
return _map(d)
| 28.418605 | 73 | 0.575832 | [
"MIT"
] | podhmo/dictknife | dictknife/transform.py | 3,666 | Python |
# Copyright (C) 2019 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from distutils.version import StrictVersion
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton)
from electrum_dash import version
from electrum_dash import constants
from electrum_dash import ecc
from electrum_dash.i18n import _
from electrum_dash.util import make_aiohttp_session
from electrum_dash.logging import Logger
class UpdateCheck(QWidget, Logger):
url = "https://raw.githubusercontent.com/akhavr/electrum-pac/master/.latest-version"
download_url = "https://github.com/PACGlobalOfficial/electrum-pac/releases"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"XuKFPN7RDbrvNsPddPyUPzVqwdhvfB67cx",
)
def __init__(self, main_window, latest_version=None):
self.main_window = main_window
QWidget.__init__(self)
self.setWindowTitle('PacGlobal Electrum - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread(self.main_window)
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
v = version.ELECTRUM_VERSION
if 'rc' in v:
v = v[:v.index('rc')]
return latest_version > StrictVersion(v)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of PacGlobal Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while PacGlobal Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self, main_window):
QThread.__init__(self)
Logger.__init__(self)
self.main_window = main_window
async def get_update_info(self):
async with make_aiohttp_session(proxy=self.main_window.network.proxy) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "1Lqm1HphuhxKZQEawzPse8gJtgjm9kUKT4": "IA+2QG3xPRn4HAIFdpu9eeaCYC7S5wS/sDxn54LJx6BdUTBpse3ibtfq8C43M7M1VfpGkD5tsdwl5C6IfpZD/gQ="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return StrictVersion(version_num.strip())
def run(self):
network = self.main_window.network
if not network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
| 41.897959 | 154 | 0.639877 | [
"MIT"
] | PACGlobalOfficial/electrum-pac | electrum_dash/gui/qt/update_checker.py | 6,159 | Python |
import asyncio
import random
from async_pipeline.stage import PipelineStage, pipeline_operation
class Loader(PipelineStage):
def __init__(self, conf, *args, **kwargs) -> None:
self._operation = conf["load"]
super().__init__(*args, **kwargs)
@pipeline_operation
async def print(self, message):
print(f"[FINAL OUT]: {message}")
await asyncio.sleep(random.randint(1, 5)) # simulated IO delay
| 27.375 | 71 | 0.680365 | [
"MIT"
] | zar3bski/async_pipeline_experiment | async_pipeline/loader.py | 438 | Python |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests that application of gates and state preparations
works correctly an a device.
"""
# pylint: disable=no-self-use
# pylint: disable=too-many-arguments
# pylint: disable=pointless-statement
from cmath import exp
from math import cos, sin, sqrt
import pytest
import numpy as np
import pennylane as qml
from scipy.linalg import block_diag
from flaky import flaky
pytestmark = pytest.mark.skip_unsupported
np.random.seed(42)
# ==========================================================
# Some useful global variables
# gates for which device support is tested
ops = {
"BasisState": qml.BasisState(np.array([0]), wires=[0]),
"CNOT": qml.CNOT(wires=[0, 1]),
"CRX": qml.CRX(0, wires=[0, 1]),
"CRY": qml.CRY(0, wires=[0, 1]),
"CRZ": qml.CRZ(0, wires=[0, 1]),
"CRot": qml.CRot(0, 0, 0, wires=[0, 1]),
"CSWAP": qml.CSWAP(wires=[0, 1, 2]),
"CZ": qml.CZ(wires=[0, 1]),
"CY": qml.CY(wires=[0, 1]),
"DiagonalQubitUnitary": qml.DiagonalQubitUnitary(np.array([1, 1]), wires=[0]),
"Hadamard": qml.Hadamard(wires=[0]),
"MultiRZ": qml.MultiRZ(0, wires=[0]),
"PauliX": qml.PauliX(wires=[0]),
"PauliY": qml.PauliY(wires=[0]),
"PauliZ": qml.PauliZ(wires=[0]),
"PhaseShift": qml.PhaseShift(0, wires=[0]),
"ControlledPhaseShift": qml.ControlledPhaseShift(0, wires=[0, 1]),
"QubitStateVector": qml.QubitStateVector(np.array([1.0, 0.0]), wires=[0]),
"QubitUnitary": qml.QubitUnitary(np.eye(2), wires=[0]),
"ControlledQubitUnitary": qml.ControlledQubitUnitary(np.eye(2), control_wires=[1], wires=[0]),
"MultiControlledX": qml.MultiControlledX(control_wires=[1, 2], wires=[0]),
"RX": qml.RX(0, wires=[0]),
"RY": qml.RY(0, wires=[0]),
"RZ": qml.RZ(0, wires=[0]),
"Rot": qml.Rot(0, 0, 0, wires=[0]),
"S": qml.S(wires=[0]),
"SWAP": qml.SWAP(wires=[0, 1]),
"ISWAP": qml.ISWAP(wires=[0, 1]),
"T": qml.T(wires=[0]),
"SX": qml.SX(wires=[0]),
"Toffoli": qml.Toffoli(wires=[0, 1, 2]),
"QFT": qml.QFT(wires=[0, 1, 2]),
"IsingXX": qml.IsingXX(0, wires=[0, 1]),
"IsingZZ": qml.IsingZZ(0, wires=[0, 1]),
"SingleExcitation": qml.SingleExcitation(0, wires=[0, 1]),
"SingleExcitationPlus": qml.SingleExcitationPlus(0, wires=[0, 1]),
"SingleExcitationMinus": qml.SingleExcitationMinus(0, wires=[0, 1]),
"DoubleExcitation": qml.DoubleExcitation(0, wires=[0, 1, 2, 3]),
"DoubleExcitationPlus": qml.DoubleExcitationPlus(0, wires=[0, 1, 2, 3]),
"DoubleExcitationMinus": qml.DoubleExcitationMinus(0, wires=[0, 1, 2, 3]),
"QubitCarry": qml.QubitCarry(wires=[0, 1, 2, 3]),
"QubitSum:": qml.QubitSum(wires=[0, 1, 2]),
}
all_ops = ops.keys()
# non-parametrized qubit gates
I = np.identity(2)
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
H = np.array([[1, 1], [1, -1]]) / sqrt(2)
S = np.diag([1, 1j])
T = np.diag([1, np.exp(1j * np.pi / 4)])
SX = 0.5 * np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]])
SWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
ISWAP = np.array([[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]])
CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
CZ = np.diag([1, 1, 1, -1])
CY = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, 1j, 0]])
toffoli = np.diag([1 for i in range(8)])
toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
CSWAP = block_diag(I, I, SWAP)
# parametrized qubit gates
phase_shift = lambda phi: np.array([[1, 0], [0, np.exp(1j * phi)]])
rx = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * X
ry = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Y
rz = lambda theta: cos(theta / 2) * I + 1j * sin(-theta / 2) * Z
rot = lambda a, b, c: rz(c) @ (ry(b) @ rz(a))
crz = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-1j * theta / 2), 0],
[0, 0, 0, np.exp(1j * theta / 2)],
]
)
cry = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cos(theta / 2), -sin(theta / 2)],
[0, 0, sin(theta / 2), cos(theta / 2)],
]
)
crx = lambda theta: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, cos(theta / 2), 1j * sin(-theta / 2)],
[0, 0, 1j * sin(-theta / 2), cos(theta / 2)],
]
)
crot = lambda phi, theta, omega: np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[
0,
0,
exp(-0.5j * (phi + omega)) * cos(theta / 2),
-exp(0.5j * (phi - omega)) * sin(theta / 2),
],
[
0,
0,
exp(-0.5j * (phi - omega)) * sin(theta / 2),
exp(0.5j * (phi + omega)) * cos(theta / 2),
],
]
)
IsingXX = lambda phi: np.array(
[
[cos(phi / 2), 0, 0, -1j * sin(phi / 2)],
[0, cos(phi / 2), -1j * sin(phi / 2), 0],
[0, -1j * sin(phi / 2), cos(phi / 2), 0],
[-1j * sin(phi / 2), 0, 0, cos(phi / 2)],
]
)
IsingZZ = lambda phi: np.array(
[
[exp(-1.0j * phi / 2), 0, 0, 0],
[0, exp(1.0j * phi / 2), 0, 0],
[0, 0, exp(1.0j * phi / 2), 0],
[0, 0, 0, exp(-1.0j * phi / 2)],
]
)
# list of all non-parametrized single-qubit gates,
# along with the PennyLane operation name
single_qubit = [
(qml.PauliX, X),
(qml.PauliY, Y),
(qml.PauliZ, Z),
(qml.Hadamard, H),
(qml.S, S),
(qml.T, T),
(qml.SX, SX),
]
# list of all parametrized single-qubit gates
# taking a single parameter
single_qubit_param = [
(qml.PhaseShift, phase_shift),
(qml.RX, rx),
(qml.RY, ry),
(qml.RZ, rz),
]
# list of all non-parametrized two-qubit gates
two_qubit = [(qml.CNOT, CNOT), (qml.SWAP, SWAP), (qml.ISWAP, ISWAP), (qml.CZ, CZ), (qml.CY, CY)]
# list of all parametrized two-qubit gates
two_qubit_param = [
(qml.CRX, crx),
(qml.CRY, cry),
(qml.CRZ, crz),
(qml.IsingXX, IsingXX),
(qml.IsingZZ, IsingZZ),
]
two_qubit_multi_param = [(qml.CRot, crot)]
# list of all three-qubit gates
three_qubit = [(qml.Toffoli, toffoli), (qml.CSWAP, CSWAP)]
# single qubit unitary matrix
theta = 0.8364
phi = -0.1234
U = np.array(
[
[
np.cos(theta / 2) * np.exp(np.complex(0, -phi / 2)),
-np.sin(theta / 2) * np.exp(np.complex(0, phi / 2)),
],
[
np.sin(theta / 2) * np.exp(np.complex(0, -phi / 2)),
np.cos(theta / 2) * np.exp(np.complex(0, phi / 2)),
],
]
)
# two qubit unitary matrix
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / sqrt(3)
# single qubit Hermitian observable
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
# ===============================================================
class TestSupportedGates:
"""Test that the device can implement all gates that it claims to support."""
@pytest.mark.parametrize("operation", all_ops)
def test_supported_gates_can_be_implemented(self, device_kwargs, operation):
"""Test that the device can implement all its supported gates."""
device_kwargs["wires"] = 4 # maximum size of current gates
dev = qml.device(**device_kwargs)
assert hasattr(dev, "operations")
if operation in dev.operations:
@qml.qnode(dev)
def circuit():
ops[operation]
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray))
@pytest.mark.parametrize("operation", all_ops)
def test_inverse_gates_can_be_implemented(self, device_kwargs, operation):
"""Test that the device can implement the inverse of all its supported gates.
This test is skipped for devices that do not support inverse operations."""
device_kwargs["wires"] = 4
dev = qml.device(**device_kwargs)
supports_inv = (
"supports_inverse_operations" in dev.capabilities()
and dev.capabilities()["supports_inverse_operations"]
)
if not supports_inv:
pytest.skip("Device does not support inverse operations.")
assert hasattr(dev, "operations")
if operation in dev.operations:
@qml.qnode(dev)
def circuit():
ops[operation].queue().inv()
return qml.expval(qml.Identity(wires=0))
assert isinstance(circuit(), (float, np.ndarray))
@flaky(max_runs=10)
class TestGatesQubit:
"""Test qubit-based devices' probability vector after application of gates."""
@pytest.mark.parametrize(
"basis_state",
[
np.array([0, 0, 1, 0]),
np.array([0, 0, 1, 0]),
np.array([1, 0, 1, 0]),
np.array([1, 1, 1, 1]),
],
)
def test_basis_state(self, device, basis_state, tol, skip_if):
"""Test basis state initialization."""
n_wires = 4
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
@qml.qnode(dev)
def circuit():
qml.BasisState(basis_state, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.zeros([2 ** n_wires])
expected[np.ravel_multi_index(basis_state, [2] * n_wires)] = 1
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_qubit_state_vector(self, device, init_state, tol, skip_if):
"""Test QubitStateVector initialisation."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
return qml.probs(range(n_wires))
res = circuit()
expected = np.abs(rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test PauliX application."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test single qubit gates taking a single scalar argument."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(func(gamma) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_rotation(self, device, init_state, tol, skip_if):
"""Test three axis rotation gate."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
a = 0.542
b = 1.3432
c = -0.654
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(rot(a, b, c) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test two qubit gates."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("param", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, param, tol, skip_if):
"""Test parametrized two qubit gates taking a single scalar argument."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(param, wires=range(n_wires))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(func(param) @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
"""Test QubitUnitary gate."""
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
if "QubitUnitary" not in dev.operations:
pytest.skip("Skipped because device does not support QubitUnitary.")
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires)))
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test three qubit gates without parameters."""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=[0, 1, 2])
return qml.probs(wires=range(n_wires))
res = circuit()
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@flaky(max_runs=10)
class TestInverseGatesQubit:
"""Test the device's probability vector after application of inverse of gates."""
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse single qubit gate application."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(1)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test inverse single qubit gates taking one scalar parameter."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
def test_rotation(self, device, init_state, tol, skip_if):
"""Test inverse three axis rotation gate."""
n_wires = 1
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.Rot(a, b, c, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = rot(a, b, c)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse two qubit gates."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("gamma", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, device, init_state, op, func, gamma, tol, skip_if):
"""Test inverse of two qubit gates taking one parameter."""
n_wires = 2
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(2)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(gamma, wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = func(gamma)
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, device, init_state, mat, tol, skip_if):
"""Test inverse QubitUnitary gate."""
n_wires = int(np.log2(len(mat)))
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(n_wires)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
qml.QubitUnitary(mat, wires=list(range(n_wires))).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, device, init_state, op, mat, tol, skip_if):
"""Test inverse three qubit gates without parameters."""
n_wires = 3
dev = device(n_wires)
skip_if(dev, {"supports_inverse_operations": False})
skip_if(dev, {"returns_probs": False})
rnd_state = init_state(3)
@qml.qnode(dev)
def circuit():
qml.QubitStateVector(rnd_state, wires=range(n_wires))
op(wires=range(n_wires)).inv()
return qml.probs(wires=range(n_wires))
res = circuit()
mat = mat.conj().T
expected = np.abs(mat @ rnd_state) ** 2
assert np.allclose(res, expected, atol=tol(dev.shots))
| 33.576378 | 98 | 0.582853 | [
"Apache-2.0"
] | AlaricCheng/pennylane | pennylane/devices/tests/test_gates.py | 21,321 | Python |
# Generated by Django 3.0.3 on 2020-08-03 15:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecommerce_platform', '0010_userprofile_address'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='address',
),
]
| 20.166667 | 60 | 0.584022 | [
"MIT"
] | kapkan7/Ecommerce-Website | obsidian_traders/ecommerce_platform/migrations/0011_remove_userprofile_address.py | 363 | Python |
import pandas as pd
import numpy as np
def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]):
""" Plot the correlation coefficient for various exponential scalings of input features
>>> np.random.seed(314159)
>>> df = pd.DataFrame()
>>> df['output'] = np.random.randn(1000)
>>> df['x10'] = df.output * 10
>>> df['sq'] = df.output ** 2
>>> df['sqrt'] = df.output ** .5
>>> optimize_feature_power(df, output_column_name='output').round(2)
x10 sq sqrt
power
2.00 -0.08 1.00 0.83
1.00 1.00 -0.08 0.97
0.80 1.00 0.90 0.99
0.50 0.97 0.83 1.00
0.25 0.93 0.76 0.99
0.10 0.89 0.71 0.97
0.01 0.86 0.67 0.95
Returns:
DataFrame:
columns are the input_columns from the source dataframe (df)
rows are correlation with output for each attempted exponent used to scale the input features
"""
output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name
input_column_names = [colname for colname in df.columns if output_column_name != colname]
results = np.zeros((len(exponents), len(input_column_names)))
for rownum, exponent in enumerate(exponents):
for colnum, column_name in enumerate(input_column_names):
results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name])
results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power'))
# results.plot(logx=True)
return results
| 40.769231 | 105 | 0.647799 | [
"MIT"
] | AAAI-DISIM-UnivAQ/nlpia | src/nlpia/features.py | 1,590 | Python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regression task.
Find commit ranges where regressions were introduced."""
from builtins import range
import random
import time
from base import errors
from base import tasks
from bot import testcase_manager
from bot.tasks import setup
from bot.tasks import task_creation
from build_management import build_manager
from build_management import revisions
from datastore import data_handler
from datastore import data_types
from google_cloud_utils import big_query
from metrics import logs
from system import environment
# Number of revisions before the maximum to test before doing a bisect. This
# is also used as a cap for revisions to test near the minimum if the minimum
# happens to be a bad build.
EXTREME_REVISIONS_TO_TEST = 3
# Number of earlier revisions to check when validating ranges.
REVISIONS_TO_TEST_FOR_VALIDATION = 2
# Maximum revisions to look back when validating.
EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION = 10
def write_to_big_query(testcase, regression_range_start, regression_range_end):
"""Write the regression range to BigQuery."""
big_query.write_range(
table_id='regressions',
testcase=testcase,
range_name='regression',
start=regression_range_start,
end=regression_range_end)
def _save_current_regression_range_indices(testcase_id, regression_range_start,
regression_range_end):
"""Save current regression range indices in case we die in middle of task."""
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.set_metadata(
'last_regression_min', regression_range_start, update_testcase=False)
testcase.set_metadata(
'last_regression_max', regression_range_end, update_testcase=False)
testcase.put()
def save_regression_range(testcase_id, regression_range_start,
regression_range_end):
"""Saves the regression range and creates blame and impact task if needed."""
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = '%d:%d' % (regression_range_start, regression_range_end)
data_handler.update_testcase_comment(
testcase, data_types.TaskState.FINISHED,
'regressed in range %s' % testcase.regression)
write_to_big_query(testcase, regression_range_start, regression_range_end)
# Force impacts update after regression range is updated. In several cases,
# we might not have a production build to test with, so regression range is
# used to decide impacts.
task_creation.create_impact_task_if_needed(testcase)
# Get blame information using the regression range result.
task_creation.create_blame_task_if_needed(testcase)
# If there is a fine grained bisection service available, request it.
task_creation.request_bisection(testcase, 'regressed')
def _testcase_reproduces_in_revision(testcase,
testcase_file_path,
job_type,
revision,
should_log=True,
min_revision=None,
max_revision=None):
"""Test to see if a test case reproduces in the specified revision."""
if should_log:
log_message = 'Testing r%d' % revision
if min_revision is not None and max_revision is not None:
log_message += ' (current range %d:%d)' % (min_revision, max_revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
log_message)
build_manager.setup_build(revision)
if not build_manager.check_app_path():
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = 'Bad build at r%d. Skipping' % revision
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(
testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
return result.is_crash()
def found_regression_near_extreme_revisions(testcase, testcase_file_path,
job_type, revision_list, min_index,
max_index):
"""Test to see if we regressed near either the min or max revision."""
# Test a few of the most recent revisions.
last_known_crashing_revision = revision_list[max_index]
for offset in range(1, EXTREME_REVISIONS_TO_TEST + 1):
current_index = max_index - offset
if current_index < min_index:
break
# If we don't crash in a recent revision, we regressed in one of the
# commits between the current revision and the one at the next index.
try:
is_crash = _testcase_reproduces_in_revision(
testcase, testcase_file_path, job_type, revision_list[current_index])
except errors.BadBuildError:
# Skip this revision.
continue
if not is_crash:
save_regression_range(testcase.key.id(), revision_list[current_index],
last_known_crashing_revision)
return True
last_known_crashing_revision = revision_list[current_index]
# Test to see if we crash in the oldest revision we can run. This is a pre-
# condition for our binary search. If we do crash in that revision, it
# implies that we regressed between the first commit and our first revision,
# which we represent as 0:|min_revision|.
for _ in range(EXTREME_REVISIONS_TO_TEST):
min_revision = revision_list[min_index]
try:
crashes_in_min_revision = _testcase_reproduces_in_revision(
testcase,
testcase_file_path,
job_type,
min_revision,
should_log=False)
except errors.BadBuildError:
# If we find a bad build, potentially try another.
if min_index + 1 >= max_index:
break
min_index += 1
continue
if crashes_in_min_revision:
save_regression_range(testcase.key.id(), 0, min_revision)
return True
return False
# We should have returned above. If we get here, it means we tried too many
# builds near the min revision, and they were all bad.
raise errors.BadBuildError(revision_list[min_index], job_type)
def validate_regression_range(testcase, testcase_file_path, job_type,
revision_list, min_index):
"""Ensure that we found the correct min revision by testing earlier ones."""
earlier_revisions = revision_list[
min_index - EARLIER_REVISIONS_TO_CONSIDER_FOR_VALIDATION:min_index]
revision_count = min(len(earlier_revisions), REVISIONS_TO_TEST_FOR_VALIDATION)
revisions_to_test = random.sample(earlier_revisions, revision_count)
for revision in revisions_to_test:
try:
if _testcase_reproduces_in_revision(testcase, testcase_file_path,
job_type, revision):
testcase = data_handler.get_testcase_by_id(testcase.key.id())
testcase.regression = 'NA'
error_message = (
'Low confidence in regression range. Test case crashes in '
'revision r%d but not later revision r%d' %
(revision, revision_list[min_index]))
data_handler.update_testcase_comment(
testcase, data_types.TaskState.ERROR, error_message)
return False
except errors.BadBuildError:
pass
return True
def find_regression_range(testcase_id, job_type):
"""Attempt to find when the testcase regressed."""
deadline = tasks.get_task_completion_deadline()
testcase = data_handler.get_testcase_by_id(testcase_id)
if not testcase:
return
if testcase.regression:
logs.log_error(
'Regression range is already set as %s, skip.' % testcase.regression)
return
# This task is not applicable for custom binaries.
if build_manager.is_custom_binary():
testcase.regression = 'NA'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Not applicable for custom binaries')
return
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
# Setup testcase and its dependencies.
file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
if not file_list:
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Failed to setup testcase')
tasks.add_task('regression', testcase_id, job_type)
return
build_bucket_path = build_manager.get_primary_bucket_path()
revision_list = build_manager.get_revisions_list(
build_bucket_path, testcase=testcase)
if not revision_list:
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
'Failed to fetch revision list')
tasks.add_task('regression', testcase_id, job_type)
return
# Don't burden NFS server with caching these random builds.
environment.set_value('CACHE_STORE', False)
# Pick up where left off in a previous run if necessary.
min_revision = testcase.get_metadata('last_regression_min')
max_revision = testcase.get_metadata('last_regression_max')
first_run = not min_revision and not max_revision
if not min_revision:
min_revision = revisions.get_first_revision_in_list(revision_list)
if not max_revision:
max_revision = testcase.crash_revision
min_index = revisions.find_min_revision_index(revision_list, min_revision)
if min_index is None:
raise errors.BuildNotFoundError(min_revision, job_type)
max_index = revisions.find_max_revision_index(revision_list, max_revision)
if max_index is None:
raise errors.BuildNotFoundError(max_revision, job_type)
# Make sure that the revision where we noticed the crash, still crashes at
# that revision. Otherwise, our binary search algorithm won't work correctly.
max_revision = revision_list[max_index]
crashes_in_max_revision = _testcase_reproduces_in_revision(
testcase, testcase_file_path, job_type, max_revision, should_log=False)
if not crashes_in_max_revision:
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = ('Known crash revision %d did not crash' % max_revision)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
task_creation.mark_unreproducible_if_flaky(testcase, True)
return
# If we've made it this far, the test case appears to be reproducible. Clear
# metadata from previous runs had it been marked as potentially flaky.
task_creation.mark_unreproducible_if_flaky(testcase, False)
# On the first run, check to see if we regressed near either the min or max
# revision.
if first_run and found_regression_near_extreme_revisions(
testcase, testcase_file_path, job_type, revision_list, min_index,
max_index):
return
while time.time() < deadline:
min_revision = revision_list[min_index]
max_revision = revision_list[max_index]
# If the min and max revisions are one apart (or the same, if we only have
# one build), this is as much as we can narrow the range.
if max_index - min_index <= 1:
# Verify that the regression range seems correct, and save it if so.
if not validate_regression_range(testcase, testcase_file_path, job_type,
revision_list, min_index):
return
save_regression_range(testcase_id, min_revision, max_revision)
return
middle_index = (min_index + max_index) // 2
middle_revision = revision_list[middle_index]
try:
is_crash = _testcase_reproduces_in_revision(
testcase,
testcase_file_path,
job_type,
middle_revision,
min_revision=min_revision,
max_revision=max_revision)
except errors.BadBuildError:
# Skip this revision.
del revision_list[middle_index]
max_index -= 1
continue
if is_crash:
max_index = middle_index
else:
min_index = middle_index
_save_current_regression_range_indices(
testcase_id, revision_list[min_index], revision_list[max_index])
# If we've broken out of the above loop, we timed out. We'll finish by
# running another regression task and picking up from this point.
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = 'Timed out, current range r%d:r%d' % (
revision_list[min_index], revision_list[max_index])
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
tasks.add_task('regression', testcase_id, job_type)
def execute_task(testcase_id, job_type):
"""Run regression task and handle potential errors."""
try:
find_regression_range(testcase_id, job_type)
except errors.BuildSetupError as error:
# If we failed to setup a build, it is likely a bot error. We can retry
# the task in this case.
testcase = data_handler.get_testcase_by_id(testcase_id)
error_message = 'Build setup failed r%d' % error.revision
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task(
'regression', testcase_id, job_type, wait_time=build_fail_wait)
except errors.BadBuildError:
# Though bad builds when narrowing the range are recoverable, certain builds
# being marked as bad may be unrecoverable. Recoverable ones should not
# reach this point.
testcase = data_handler.get_testcase_by_id(testcase_id)
testcase.regression = 'NA'
error_message = 'Unable to recover from bad build'
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
error_message)
| 40.79235 | 80 | 0.717616 | [
"Apache-2.0"
] | backwardn/clusterfuzz | src/python/bot/tasks/regression_task.py | 14,930 | Python |
# -*- coding: utf-8 -*-
"""
@Remark: 自定义视图集
"""
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.viewsets import ModelViewSet
from utils.filters import DataLevelPermissionsFilter
from utils.jsonResponse import SuccessResponse,ErrorResponse
from utils.permission import CustomPermission
from django.http import Http404
from django.shortcuts import get_object_or_404 as _get_object_or_404
from django.core.exceptions import ValidationError
from utils.exception import APIException
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.permissions import IsAuthenticated
def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise APIException(message='该对象不存在或者无访问权限')
class CustomModelViewSet(ModelViewSet):
"""
自定义的ModelViewSet:
统一标准的返回格式;新增,查询,修改可使用不同序列化器
(1)ORM性能优化, 尽可能使用values_queryset形式
(2)create_serializer_class 新增时,使用的序列化器
(3)update_serializer_class 修改时,使用的序列化器
"""
values_queryset = None
ordering_fields = '__all__'
create_serializer_class = None
update_serializer_class = None
filter_fields = ()
# filter_fields = '__all__'
search_fields = ()
extra_filter_backends = [DataLevelPermissionsFilter]
permission_classes = [CustomPermission,IsAuthenticated]
filter_backends = [DjangoFilterBackend, OrderingFilter, SearchFilter]
def filter_queryset(self, queryset):
for backend in set(set(self.filter_backends) | set(self.extra_filter_backends or [])):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def get_queryset(self):
if getattr(self, 'values_queryset', None):
return self.values_queryset
return super().get_queryset()
def get_serializer_class(self):
action_serializer_name = f"{self.action}_serializer_class"
action_serializer_class = getattr(self, action_serializer_name, None)
if action_serializer_class:
return action_serializer_class
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data, request=request)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return SuccessResponse(data=serializer.data, msg="新增成功")
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, request=request)
return self.get_paginated_response(serializer.data)
# result = self.get_paginated_response(serializer.data)
# print(51,result.data)
# return JsonResponse(code=2000,msg="获取成功", data=result.data)
serializer = self.get_serializer(queryset, many=True, request=request)
return SuccessResponse(data=serializer.data, msg="获取成功")
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return SuccessResponse(data=serializer.data, msg="获取成功")
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, request=request, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return SuccessResponse(data=serializer.data, msg="更新成功")
#增强drf得批量删除功能 :http请求方法:delete 如: url /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户
def get_object_list(self):
queryset = self.filter_queryset(self.get_queryset())
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {f"{self.lookup_field}__in": self.kwargs[lookup_url_kwarg].split(',')}
obj = queryset.filter(**filter_kwargs)
self.check_object_permissions(self.request, obj)
return obj
#重写delete方法,让它支持批量删除 如: /api/admin/user/1,2,3/ 批量删除id 1,2,3得用户
def destroy(self, request, *args, **kwargs):
instance = self.get_object_list()
self.perform_destroy(instance)
return SuccessResponse(data=[], msg="删除成功")
def perform_destroy(self, instance):
instance.delete()
#原来得单id删除方法
# def destroy(self, request, *args, **kwargs):
# instance = self.get_object()
# self.perform_destroy(instance)
# return SuccessResponse(data=[], msg="删除成功")
#新的批量删除方法
keys = openapi.Schema(description='主键列表', type=openapi.TYPE_ARRAY, items=openapi.TYPE_STRING)
@swagger_auto_schema(request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['keys'],
properties={'keys': keys}
), operation_summary='批量删除')
@action(methods=['delete'], detail=False)
def multiple_delete(self, request, *args, **kwargs):
#print(request.data)
request_data = request.data
keys = request_data.get('keys', None)
if keys:
self.get_queryset().filter(id__in=keys).delete()
return SuccessResponse(data=[], msg="删除成功")
else:
return ErrorResponse(msg="未获取到keys字段")
| 41.337662 | 103 | 0.696199 | [
"Apache-2.0"
] | lybbn/django-vue-lyadmin | backend/utils/viewset.py | 6,746 | Python |
# -*- coding: utf-8 -*-
from base.log import *
import os
def get_url(trackId,trackPointId,type1,seq,imageType):
cmd = 'http://10.11.5.34:13100/krs/image/get?trackPointId=%s&type=%s&seq=%s&imageType=%s' %(trackPointId,type1,seq,imageType)
return cmd
def main():
url = get_url('123', '123', '00', '004', 'jpg')
print url
if __name__ == '__main__':
main()
| 21.470588 | 127 | 0.663014 | [
"MIT"
] | wangzishuo111/doraemon | mesh_krs_imagequery.py | 365 | Python |
enum = 0
enum1 = 0
enum2 = 0
prob = 0
p1 = 0
p2 = 0
parity = 0
for z1 in range(1, 6):
for y1 in range(z1+1, 7):
for z2 in range(1, z1+1):
for y2 in range(z2+1, y1+1):
""" for y2 in range(1, y1):
for z2 in range(y2, z1+1):
for z3 in range(1, z2+1):
if y1 == y2:
enum1 = 1
elif y1 > y2:
enum1 = 2
else:
enum1 = 0
p1 = enum1/36
if z1 == z2 == z3:
enum2 = 1
elif z1 != z2 != z3:
enum2 = 6
else:
enum2 = 3
p2 = enum2/216
enum += enum1 * enum2
prob += p1 * p2 """
# print(y1, z1, y2, z2)
if z1 == z2:
enum1 = 1
elif z1 > z2:
enum1 = 2
else:
enum1 = 0
p1 = enum1 / 36
if y1 == y2:
enum2 = 1
elif y1 > y2:
enum2 = 2
else:
enum2 = 0
p2 = enum2 / 36
enum += enum1 * enum2
prob += p1 * p2
print(enum, prob) | 28.981481 | 49 | 0.256869 | [
"Apache-2.0"
] | belerico/spqrisiko-abm | src/compute_probs.py | 1,565 | Python |
import datetime
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, List, Optional
import dateutil.parser
import pytest
import requests
from determined import experimental
from determined.common import api, yaml
from determined.common.api import authentication, certs
from tests import config as conf
from tests.cluster import utils as cluster_utils
def maybe_create_native_experiment(context_dir: str, command: List[str]) -> Optional[int]:
target_env = os.environ.copy()
target_env["DET_MASTER"] = conf.make_master_url()
with subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=context_dir, env=target_env
) as p:
assert p.stdout is not None
for line in p.stdout:
m = re.search(r"Created experiment (\d+)\n", line.decode())
if m is not None:
return int(m.group(1))
return None
def create_native_experiment(context_dir: str, command: List[str]) -> int:
experiment_id = maybe_create_native_experiment(context_dir, command)
if experiment_id is None:
pytest.fail(f"Failed to create experiment in {context_dir}: {command}")
return experiment_id
def maybe_create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> subprocess.CompletedProcess:
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
config_file,
model_def_file,
]
if create_args is not None:
command += create_args
env = os.environ.copy()
env["DET_DEBUG"] = "true"
return subprocess.run(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
def create_experiment(
config_file: str, model_def_file: str, create_args: Optional[List[str]] = None
) -> int:
completed_process = maybe_create_experiment(config_file, model_def_file, create_args)
assert completed_process.returncode == 0, "\nstdout:\n{} \nstderr:\n{}".format(
completed_process.stdout, completed_process.stderr
)
m = re.search(r"Created experiment (\d+)\n", str(completed_process.stdout))
assert m is not None
return int(m.group(1))
def pause_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "pause", str(experiment_id)]
subprocess.check_call(command)
def activate_experiment(experiment_id: int) -> None:
command = ["det", "-m", conf.make_master_url(), "experiment", "activate", str(experiment_id)]
subprocess.check_call(command)
def change_experiment_state(experiment_id: int, new_state: str) -> None:
# TODO(DET-5678): refactor tests to not use cli singleton auth.
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.patch(
conf.make_master_url(),
"experiments/{}".format(experiment_id),
headers={"Content-Type": "application/merge-patch+json"},
json={"state": new_state},
)
assert r.status_code == requests.codes.no_content, r.text
def cancel_experiment(experiment_id: int) -> None:
change_experiment_state(experiment_id, "STOPPING_CANCELED")
# We may never observe the STOPPING_CANCELED state.
wait_for_experiment_state(experiment_id, "CANCELED")
def cancel_experiment_v1(experiment_id: int) -> None:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.post(conf.make_master_url(), "/api/v1/experiments/{}/cancel".format(experiment_id))
r.raise_for_status()
wait_for_experiment_state(experiment_id, "CANCELED")
def wait_for_experiment_state(
experiment_id: int,
target_state: str,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
log_every: int = 60,
) -> None:
for seconds_waited in range(max_wait_secs):
try:
state = experiment_state(experiment_id)
# Ignore network errors while polling for experiment state to avoid a
# single network flake to cause a test suite failure. If the master is
# unreachable multiple times, this test will fail after max_wait_secs.
except api.errors.MasterNotFoundException:
logging.warning(
"Network failure ignored when polling for state of "
"experiment {}".format(experiment_id)
)
time.sleep(1)
continue
if state == target_state:
return
if is_terminal_state(state):
if state != target_state:
report_failed_experiment(experiment_id)
pytest.fail(
f"Experiment {experiment_id} terminated in {state} state, expected {target_state}"
)
if seconds_waited > 0 and seconds_waited % log_every == 0:
print(
f"Waited {seconds_waited} seconds for experiment {experiment_id} "
f"(currently {state}) to reach {target_state}"
)
time.sleep(1)
else:
if target_state == "COMPLETED":
cancel_experiment(experiment_id)
report_failed_experiment(experiment_id)
pytest.fail(
"Experiment did not reach target state {} after {} seconds".format(
target_state, max_wait_secs
)
)
def experiment_has_active_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "tasks").json()
for task in r.values():
if "Experiment {}".format(experiment_id) in task["name"] and len(task["containers"]) > 0:
return True
return False
def wait_for_experiment_active_workload(
experiment_id: int, max_ticks: int = conf.MAX_TASK_SCHEDULED_SECS
) -> None:
for _ in range(conf.MAX_TASK_SCHEDULED_SECS):
if experiment_has_active_workload(experiment_id):
return
time.sleep(1)
pytest.fail(
f"The only trial cannot be scheduled within {max_ticks} seconds.",
)
def wait_for_experiment_workload_progress(
experiment_id: int, max_ticks: int = conf.MAX_TRIAL_BUILD_SECS
) -> None:
for _ in range(conf.MAX_TRIAL_BUILD_SECS):
trials = experiment_trials(experiment_id)
if len(trials) > 0:
only_trial = trials[0]
if len(only_trial["steps"]) > 1:
return
time.sleep(1)
pytest.fail(
f"Trial cannot finish first workload within {max_ticks} seconds.",
)
def experiment_has_completed_workload(experiment_id: int) -> bool:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
trials = experiment_trials(experiment_id)
if not any(trials):
return False
return any(any(s["state"] == "COMPLETED" for s in t["steps"]) for t in trials)
def experiment_json(experiment_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments/{}".format(experiment_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def experiment_state(experiment_id: int) -> str:
state = experiment_json(experiment_id)["state"] # type: str
return state
def experiment_trials(experiment_id: int) -> List[Dict[str, Any]]:
trials = experiment_json(experiment_id)["trials"] # type: List[Dict[str, Any]]
return trials
def num_experiments() -> int:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "experiments")
assert r.status_code == requests.codes.ok, r.text
return len(r.json())
def cancel_single(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def cancel_single_v1(experiment_id: int, should_have_trial: bool = False) -> None:
cancel_experiment_v1(experiment_id)
trials = experiment_trials(experiment_id)
if should_have_trial or len(trials) > 0:
assert len(trials) == 1
trial = trials[0]
assert trial["state"] == "CANCELED"
def is_terminal_state(state: str) -> bool:
return state in ("CANCELED", "COMPLETED", "ERROR")
def trial_metrics(trial_id: int) -> Dict[str, Any]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
r = api.get(conf.make_master_url(), "trials/{}/metrics".format(trial_id))
assert r.status_code == requests.codes.ok, r.text
json = r.json() # type: Dict[str, Any]
return json
def get_flat_metrics(trial_id: int, metric: str) -> List:
full_trial_metrics = trial_metrics(trial_id)
metrics = [m for step in full_trial_metrics["steps"] for m in step["metrics"]["batch_metrics"]]
return [v[metric] for v in metrics]
def num_trials(experiment_id: int) -> int:
return len(experiment_trials(experiment_id))
def num_active_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ACTIVE" else 0 for t in experiment_trials(experiment_id))
def num_completed_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "COMPLETED" else 0 for t in experiment_trials(experiment_id))
def num_error_trials(experiment_id: int) -> int:
return sum(1 if t["state"] == "ERROR" else 0 for t in experiment_trials(experiment_id))
def trial_logs(trial_id: int) -> List[str]:
certs.cli_cert = certs.default_load(conf.make_master_url())
authentication.cli_auth = authentication.Authentication(conf.make_master_url(), try_reauth=True)
return [tl["message"] for tl in api.trial_logs(conf.make_master_url(), trial_id)]
def check_if_string_present_in_trial_logs(trial_id: int, target_string: str) -> bool:
logs = trial_logs(trial_id)
for log_line in logs:
if target_string in log_line:
return True
return False
def assert_equivalent_trials(A: int, B: int, validation_metrics: List[str]) -> None:
full_trial_metrics1 = trial_metrics(A)
full_trial_metrics2 = trial_metrics(B)
assert len(full_trial_metrics1["steps"]) == len(full_trial_metrics2["steps"])
for step1, step2 in zip(full_trial_metrics1["steps"], full_trial_metrics2["steps"]):
metric1 = step1["metrics"]["batch_metrics"]
metric2 = step2["metrics"]["batch_metrics"]
for batch1, batch2 in zip(metric1, metric2):
assert len(batch1) == len(batch2) == 2
assert batch1["loss"] == pytest.approx(batch2["loss"])
if step1["validation"] is not None or step2["validation"] is not None:
assert step1["validation"] is not None
assert step2["validation"] is not None
for metric in validation_metrics:
val1 = step1.get("validation").get("metrics").get("validation_metrics").get(metric)
val2 = step2.get("validation").get("metrics").get("validation_metrics").get(metric)
assert val1 == pytest.approx(val2)
def assert_performed_initial_validation(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
zeroth_step = steps[0]
assert zeroth_step["validation"] is not None
assert zeroth_step["validation"]["total_batches"] == 0
assert zeroth_step["validation"]["state"] == "COMPLETED"
def assert_performed_final_checkpoint(exp_id: int) -> None:
trials = experiment_trials(exp_id)
assert len(trials) > 0
steps = trials[0]["steps"]
assert len(steps) > 0
last_step = steps[-1]
assert last_step["checkpoint"] is not None
assert last_step["checkpoint"]["state"] == "COMPLETED"
def run_describe_cli_tests(experiment_id: int) -> None:
"""
Runs `det experiment describe` CLI command on a finished
experiment. Will raise an exception if `det experiment describe`
encounters a traceback failure.
"""
# "det experiment describe" without metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
# "det experiment describe" with metrics.
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
str(experiment_id),
"--metrics",
"--outdir",
tmpdir,
]
)
assert os.path.exists(os.path.join(tmpdir, "experiments.csv"))
assert os.path.exists(os.path.join(tmpdir, "workloads.csv"))
assert os.path.exists(os.path.join(tmpdir, "trials.csv"))
def run_list_cli_tests(experiment_id: int) -> None:
"""
Runs list-related CLI commands on a finished experiment. Will raise an
exception if the CLI command encounters a traceback failure.
"""
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-trials", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "list-checkpoints", str(experiment_id)]
)
subprocess.check_call(
[
"det",
"-m",
conf.make_master_url(),
"experiment",
"list-checkpoints",
"--best",
str(1),
str(experiment_id),
]
)
def report_failed_experiment(experiment_id: int) -> None:
trials = experiment_trials(experiment_id)
active = sum(1 for t in trials if t["state"] == "ACTIVE")
paused = sum(1 for t in trials if t["state"] == "PAUSED")
stopping_completed = sum(1 for t in trials if t["state"] == "STOPPING_COMPLETED")
stopping_canceled = sum(1 for t in trials if t["state"] == "STOPPING_CANCELED")
stopping_error = sum(1 for t in trials if t["state"] == "STOPPING_ERROR")
completed = sum(1 for t in trials if t["state"] == "COMPLETED")
canceled = sum(1 for t in trials if t["state"] == "CANCELED")
errored = sum(1 for t in trials if t["state"] == "ERROR")
stopping_killed = sum(1 for t in trials if t["state"] == "STOPPING_KILLED")
print(
f"Experiment {experiment_id}: {len(trials)} trials, {completed} completed, "
f"{active} active, {paused} paused, {stopping_completed} stopping-completed, "
f"{stopping_canceled} stopping-canceled, {stopping_error} stopping-error, "
f"{stopping_killed} stopping-killed, {canceled} canceled, {errored} errored",
file=sys.stderr,
)
for trial in trials:
print_trial_logs(trial["id"])
def report_failed_trial(trial_id: int, state: str) -> None:
print(f"Trial {trial_id} was not COMPLETED but {state}", file=sys.stderr)
print_trial_logs(trial_id)
def print_trial_logs(trial_id: int) -> None:
print("******** Start of logs for trial {} ********".format(trial_id), file=sys.stderr)
print("".join(trial_logs(trial_id)), file=sys.stderr)
print("******** End of logs for trial {} ********".format(trial_id), file=sys.stderr)
def run_basic_test(
config_file: str,
model_def_file: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
assert os.path.isdir(model_def_file)
experiment_id = create_experiment(config_file, model_def_file, create_args)
wait_for_experiment_state(experiment_id, "COMPLETED", max_wait_secs=max_wait_secs)
assert num_active_trials(experiment_id) == 0
verify_completed_experiment_metadata(experiment_id, expected_trials)
return experiment_id
def verify_completed_experiment_metadata(
experiment_id: int, num_expected_trials: Optional[int]
) -> None:
# If `expected_trials` is None, the expected number of trials is
# non-deterministic.
if num_expected_trials is not None:
assert num_trials(experiment_id) == num_expected_trials
assert num_completed_trials(experiment_id) == num_expected_trials
# Check that every trial and step is COMPLETED.
trials = experiment_trials(experiment_id)
assert len(trials) > 0
for trial in trials:
if trial["state"] != "COMPLETED":
report_failed_trial(trial["id"], trial["state"])
pytest.fail(f"Trial {trial['id']} was not COMPLETED but {trial['state']}")
assert len(trial["steps"]) > 0
# Check that batches appear in increasing order.
batch_ids = [s["total_batches"] for s in trial["steps"]]
assert all(x <= y for x, y in zip(batch_ids, batch_ids[1:]))
for step in trial["steps"]:
assert step["state"] == "COMPLETED"
if step["validation"]:
validation = step["validation"]
assert validation["state"] == "COMPLETED"
if step["checkpoint"]:
checkpoint = step["checkpoint"]
assert checkpoint["state"] in {"COMPLETED", "DELETED"}
# The last step of every trial should have a checkpoint.
for trial in trials:
last_step = trial["steps"][-1]
assert last_step["checkpoint"]
# When the experiment completes, all slots should now be free. This
# requires terminating the experiment's last container, which might
# take some time.
max_secs_to_free_slots = 30
for _ in range(max_secs_to_free_slots):
if cluster_utils.num_free_slots() == cluster_utils.num_slots():
break
time.sleep(1)
else:
raise AssertionError("Slots failed to free after experiment {}".format(experiment_id))
# Run a series of CLI tests on the finished experiment, to sanity check
# that basic CLI commands don't raise errors.
run_describe_cli_tests(experiment_id)
run_list_cli_tests(experiment_id)
# Use Determined to run an experiment that we expect to fail.
def run_failure_test(
config_file: str, model_def_file: str, error_str: Optional[str] = None
) -> None:
experiment_id = create_experiment(config_file, model_def_file)
wait_for_experiment_state(experiment_id, "ERROR")
# The searcher is configured with a `max_trials` of 8. Since the
# first step of each trial results in an error, there should be no
# completed trials.
#
# Most of the trials should result in ERROR, but depending on that
# seems fragile: if we support task preemption in the future, we
# might start a trial but cancel it before we hit the error in the
# model definition.
assert num_active_trials(experiment_id) == 0
assert num_completed_trials(experiment_id) == 0
assert num_error_trials(experiment_id) >= 1
# For each failed trial, check for the expected error in the logs.
trials = experiment_trials(experiment_id)
for t in trials:
if t["state"] != "ERROR":
continue
trial_id = t["id"]
logs = trial_logs(trial_id)
if error_str is not None:
assert any(error_str in line for line in logs)
def get_validation_metric_from_last_step(
experiment_id: int, trial_id: int, validation_metric_name: str
) -> float:
trial = experiment_trials(experiment_id)[trial_id]
last_validation = trial["steps"][len(trial["steps"]) - 1]["validation"]
return last_validation["metrics"]["validation_metrics"][validation_metric_name] # type: ignore
class ExperimentDurations:
def __init__(
self,
experiment_duration: datetime.timedelta,
training_duration: datetime.timedelta,
validation_duration: datetime.timedelta,
checkpoint_duration: datetime.timedelta,
):
self.experiment_duration = experiment_duration
self.training_duration = training_duration
self.validation_duration = validation_duration
self.checkpoint_duration = checkpoint_duration
def __str__(self) -> str:
duration_strs = []
duration_strs.append(f"experiment duration: {self.experiment_duration}")
duration_strs.append(f"training duration: {self.training_duration}")
duration_strs.append(f"validation duration: {self.validation_duration}")
duration_strs.append(f"checkpoint duration: {self.checkpoint_duration}")
return "\n".join(duration_strs)
def get_experiment_durations(experiment_id: int, trial_idx: int) -> ExperimentDurations:
experiment_metadata = experiment_json(experiment_id)
end_time = dateutil.parser.parse(experiment_metadata["end_time"])
start_time = dateutil.parser.parse(experiment_metadata["start_time"])
experiment_duration = end_time - start_time
training_duration = datetime.timedelta(seconds=0)
validation_duration = datetime.timedelta(seconds=0)
checkpoint_duration = datetime.timedelta(seconds=0)
for step in experiment_metadata["trials"][trial_idx]["steps"]:
end_time = dateutil.parser.parse(step["end_time"])
start_time = dateutil.parser.parse(step["start_time"])
training_duration += end_time - start_time
if "validation" in step and step["validation"]:
end_time = dateutil.parser.parse(step["validation"]["end_time"])
start_time = dateutil.parser.parse(step["validation"]["start_time"])
validation_duration += end_time - start_time
if "checkpoint" in step and step["checkpoint"]:
end_time = dateutil.parser.parse(step["checkpoint"]["end_time"])
start_time = dateutil.parser.parse(step["checkpoint"]["start_time"])
checkpoint_duration += end_time - start_time
return ExperimentDurations(
experiment_duration, training_duration, validation_duration, checkpoint_duration
)
def run_basic_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
expected_trials: Optional[int],
create_args: Optional[List[str]] = None,
max_wait_secs: int = conf.DEFAULT_MAX_WAIT_SECS,
) -> int:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = run_basic_test(
tf.name,
model_def_path,
expected_trials,
create_args,
max_wait_secs=max_wait_secs,
)
return experiment_id
def run_failure_test_with_temp_config(
config: Dict[Any, Any],
model_def_path: str,
error_str: Optional[str] = None,
) -> None:
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
run_failure_test(tf.name, model_def_path, error_str=error_str)
def shared_fs_checkpoint_config() -> Dict[str, str]:
return {
"type": "shared_fs",
"host_path": "/tmp",
"storage_path": "determined-integration-checkpoints",
}
def s3_checkpoint_config(secrets: Dict[str, str], prefix: Optional[str] = None) -> Dict[str, str]:
config_dict = {
"type": "s3",
"access_key": secrets["INTEGRATIONS_S3_ACCESS_KEY"],
"secret_key": secrets["INTEGRATIONS_S3_SECRET_KEY"],
"bucket": secrets["INTEGRATIONS_S3_BUCKET"],
}
if prefix is not None:
config_dict["prefix"] = prefix
return config_dict
def s3_checkpoint_config_no_creds() -> Dict[str, str]:
return {"type": "s3", "bucket": "determined-ai-examples"}
def root_user_home_bind_mount() -> Dict[str, str]:
return {"host_path": "/tmp", "container_path": "/root"}
def _export_and_load_model(experiment_id: int, master_url: str) -> None:
experimental.Determined(master_url).get_experiment(experiment_id).top_checkpoint().load()
def export_and_load_model(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode
| 35.463788 | 100 | 0.669049 | [
"Apache-2.0"
] | liamcli/determined | e2e_tests/tests/experiment/experiment.py | 25,463 | Python |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Reading outputs from E+
# <codecell>
# some initial set up
# if you have not installed epp, and only downloaded it
# you will need the following lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
# <headingcell level=2>
# Using titletable() to get at the tables
# <markdowncell>
# So far we have been making changes to the IDF input file.
# How about looking at the outputs.
#
# Energyplus makes nice htmlout files that look like this.
# <codecell>
from eppy import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet1) #display the image below
# <markdowncell>
# If you look at the clipping of the html file above, you see tables with data in them. Eppy has functions that let you access of these tables and get the data from any of it's cells.
#
# Let us say you want to find the "Net Site Energy".
#
# This is in table "Site and Source Energy".
#
# The number you want is in the third row, second column and it's value is "47694.47"
#
# Let us use eppy to extract this number
# <codecell>
from eppy import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_7_2/5ZoneCAVtoVAVWarmestTempFlowTable_ABUPS.html" # the html file you want to read
filehandle = open(fname, 'r').read() # get a file handle to the html file
htables = readhtml.titletable(filehandle) # reads the tables with their titles
# <markdowncell>
# If you open the python file readhtml.py and look at the function titletable, you can see the function documentation.
#
# It says the following
# <rawcell>
# """return a list of [(title, table), .....]
# title = previous item with a <b> tag
# table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]"""
#
# <markdowncell>
# The documentation says that it returns a list.
# Let us take a look inside this list.
# Let us look at the first item in the list.
# <codecell>
firstitem = htables[0]
print(firstitem)
# <markdowncell>
# Ughh !!! that is ugly. Hard to see what it is.
# Let us use a python module to print it pretty
# <codecell>
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(firstitem)
# <markdowncell>
# Nice. that is a little clearer
# <codecell>
firstitem_title = firstitem[0]
pp.pprint(firstitem_title)
# <codecell>
firstitem_table = firstitem[1]
pp.pprint(firstitem_table)
# <markdowncell>
# How do we get to value of "Net Site Energy".
# We know it is in the third row, second column of the table.
#
# Easy.
# <codecell>
thirdrow = firstitem_table[2] # we start counting with 0. So 0, 1, 2 is third row
print(thirdrow)
# <codecell>
thirdrow_secondcolumn = thirdrow[1]
thirdrow_secondcolumn
# <markdowncell>
# the text from the html table is in unicode.
# That is why you see that weird 'u' letter.
#
# Let us convert it to a floating point number
# <codecell>
net_site_energy = float(thirdrow_secondcolumn)
net_site_energy
# <markdowncell>
# Let us have a little fun with the tables.
#
# Get the titles of all the tables
# <codecell>
alltitles = [htable[0] for htable in htables]
alltitles
# <markdowncell>
# Now let us grab the tables with the titles "Building Area" and "Site to Source Energy Conversion Factors"
# <markdowncell>
# twotables = [htable for htable in htables if htable[0] in ["Building Area", "Site to Source Energy Conversion Factors"]]
# twotables
# <markdowncell>
# Let us leave readtables for now.
#
# It gives us the basic functionality to read any of the tables in the html output file.
# <headingcell level=2>
# Using lines_table() to get at the tables
# <markdowncell>
# We have been using titletable() to get at the tables. There is a constraint using function titletable(). Titletable() assumes that there is a unique title (in HTML bold) just above the table. It is assumed that this title will adequetly describe the table. This is true in most cases and titletable() is perfectly good to use. Unfortuntely there are some tables that do not follow this rule. The snippet below shows one of them.
# <codecell>
from eppy import ex_inits #no need to know this code, it just shows the image below
for_images = ex_inits
for_images.display_png(for_images.html_snippet2) # display the image below
# <markdowncell>
# Notice that the HTML snippet shows a table with three lines above it. The first two lines have information that describe the table. We need to look at both those lines to understand what the table contains. So we need a different function that will capture all those lines before the table. The funtion lines_table() described below will do this.
# <codecell>
from eppy import readhtml # the eppy module with functions to read the html
fname = "../eppy/resources/outputfiles/V_8_1/ASHRAE30pct.PI.Final11_OfficeMedium_STD2010_Chicago-baseTable.html" # the html file you want to read
filehandle = open(fname, 'r').read() # get a file handle to the html file
ltables = readhtml.lines_table(filehandle) # reads the tables with their titles
# <markdowncell>
# The html snippet shown above is the last table in HTML file we just opened. We have used lines_table() to read the tables into the variable ltables. We can get to the last table by ltable[-1]. Let us print it and see what we have.
# <codecell>
import pprint
pp = pprint.PrettyPrinter()
pp.pprint(ltables[-1])
# <markdowncell>
# We can see that ltables has captured all the lines before the table. Let us make our code more explicit to see this
# <codecell>
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
pp.pprint(lines_before_table)
# <markdowncell>
# We found this table the easy way this time, because we knew it was the last one. How do we find it if we don't know where it is in the file ? Python comes to our rescue :-) Let assume that we want to find the table that has the following two lines before it.
#
# - Report: FANGER DURING COOLING AND ADAPTIVE COMFORT
# - For: PERIMETER_MID_ZN_4
# <codecell>
line1 = 'Report: FANGER DURING COOLING AND ADAPTIVE COMFORT'
line2 = 'For: PERIMETER_MID_ZN_4'
#
# check if those two lines are before the table
line1 in lines_before_table and line2 in lines_before_table
# <codecell>
# find all the tables where those two lines are before the table
[ltable for ltable in ltables
if line1 in ltable[0] and line2 in ltable[0]]
# <markdowncell>
# That worked !
#
# What if you want to find the words "FANGER" and "PERIMETER_MID_ZN_4" before the table. The following code will do it.
# <codecell>
# sample code to illustrate what we are going to do
last_ltable = ltables[-1]
lines_before_table = last_ltable[0]
table_itself = last_ltable[-1]
# join lines_before_table into a paragraph of text
justtext = '\n'.join(lines_before_table)
print(justtext)
# <codecell>
"FANGER" in justtext and "PERIMETER_MID_ZN_4" in justtext
# <codecell>
# Let us combine the this trick to find the table
[ltable for ltable in ltables
if "FANGER" in '\n'.join(ltable[0]) and "PERIMETER_MID_ZN_4" in '\n'.join(ltable[0])]
# <headingcell level=2>
# Extracting data from the tables
# <markdowncell>
# The tables in the HTML page in general have text in the top header row. The first vertical row has text. The remaining cells have numbers. We can identify the numbers we need by looking at the labelin the top row and the label in the first column. Let us construct a simple example and explore this.
# <codecell>
# ignore the following three lines. I am using them to construct the table below
from IPython.display import HTML
atablestring = '<TABLE cellpadding="4" style="border: 1px solid #000000; border-collapse: collapse;" border="1">\n <TR>\n <TD> </TD>\n <TD>a b</TD>\n <TD>b c</TD>\n <TD>c d</TD>\n </TR>\n <TR>\n <TD>x y</TD>\n <TD>1</TD>\n <TD>2</TD>\n <TD>3</TD>\n </TR>\n <TR>\n <TD>y z</TD>\n <TD>4</TD>\n <TD>5</TD>\n <TD>6</TD>\n </TR>\n <TR>\n <TD>z z</TD>\n <TD>7</TD>\n <TD>8</TD>\n <TD>9</TD>\n </TR>\n</TABLE>'
HTML(atablestring)
# <markdowncell>
# This table is actually in the follwoing form:
# <codecell>
atable = [["", "a b", "b c", "c d"],
["x y", 1, 2, 3 ],
["y z", 4, 5, 6 ],
["z z", 7, 8, 9 ],]
# <markdowncell>
# We can see the labels in the table. So we an look at row "x y" and column "c d". The value there is 3
# <markdowncell>
# right now we can get to it by saying atable[1][3]
# <codecell>
print(atable[1][3])
# <markdowncell>
# readhtml has some functions that will let us address the values by the labels. We use a structure from python called named tuples to do this. The only limitation is that the labels have to be letters or digits. Named tuples does not allow spaces in the labels. We could replace the space with an underscore ' _ '. So "a b" will become "a_b". So we can look for row "x_y" and column "c_d". Let us try this out.
# <codecell>
from eppy import readhtml
h_table = readhtml.named_grid_h(atable)
# <codecell>
print(h_table.x_y.c_d)
# <markdowncell>
# We can still get to the value by index
# <codecell>
print(h_table[0][2])
# <markdowncell>
# Note that we used atable[1][3], but here we used h_table[0][2]. That is because h_table does not count the rows and columns where the labels are.
# <markdowncell>
# We can also do the following:
# <codecell>
print(h_table.x_y[2])
# or
print(h_table[0].c_d)
# <markdowncell>
# Wow … that is pretty cool. What if we want to just check what the labels are ?
# <codecell>
print(h_table._fields)
# <markdowncell>
# That gives us the horizontal lables. How about the vertical labels ?
# <codecell>
h_table.x_y._fields
# <markdowncell>
# There you go !!!
# <markdowncell>
# How about if I want to use the labels differently ? Say I want to refer to the row first and then to the column. That woul be saying table.c_d.x_y. We can do that by using a different function
# <codecell>
v_table = readhtml.named_grid_v(atable)
print(v_table.c_d.x_y)
# <markdowncell>
# And we can do the following
# <codecell>
print(v_table[2][0])
print(v_table.c_d[0])
print(v_table[2].x_y)
# <markdowncell>
# Let us try to get the numbers in the first column and then get their sum
# <codecell>
v_table.a_b
# <markdowncell>
# Look like we got the right column. But not in the right format. We really need a list of numbers
# <codecell>
[cell for cell in v_table.a_b]
# <markdowncell>
# That looks like waht we wanted. Now let us get the sum
# <codecell>
values_in_first_column = [cell for cell in v_table.a_b]
print(values_in_first_column)
print(sum(values_in_first_column)) # sum is a builtin function that will sum a list
# <markdowncell>
# To get the first row we use the variable h_table
# <codecell>
values_in_first_row = [cell for cell in h_table.x_y]
print(values_in_first_row)
print(sum(values_in_first_row))
# <codecell>
| 27.046569 | 430 | 0.720616 | [
"MIT"
] | lymereJ/eppy | docs/Outputs_Tutorial.py | 11,037 | Python |
#!/usr/bin/python3
import time
from calcul import *
import sys
import os
max_exec = 10
red = "\033[31m"
white = "\033[39m"
cyan = "\033[36m"
green = "\033[32m"
save = sys.stdout
so = open("file.log", 'w')
sys.stdout = so
def rectangle_time(n):
time_rect = []
i = 0
while i < max_exec:
start_time = time.time()
calcul_rectangles(n)
time_rect.append(time.time() - start_time)
i += 1
return time_rect
def trapeze_time(n):
time_trap = []
i = 0
while i < max_exec:
start_time = time.time()
calcul_trapezoïds(n)
time_trap.append(time.time() - start_time)
i += 1
return time_trap
def simpson_time(n):
time_simp = []
i = 0
while i < max_exec:
start_time = time.time()
calcul_simpson(n)
time_simp.append(time.time() - start_time)
i += 1
return time_simp
def calc_dict(tab, name):
i = 0
result = 0
dic = {}
while i < max_exec:
result += tab[i]
i += 1
result = result / max_exec
dic["Name"] = name
dic["Value"] = result
return dic
def get_min_time(dict1, dict2, dict3):
if dict1.get("Value") < dict2.get("Value") and dict1.get("Value") < dict3.get("Value"):
return 1
if dict2.get("Value") < dict1.get("Value") and dict2.get("Value") < dict3.get("Value"):
return 2
if dict3.get("Value") < dict2.get("Value") and dict3.get("Value") < dict1.get("Value"):
return 3
def get_min_precision(prec1, prec2, prec3):
prec1 = abs(prec1)
prec2 = abs(prec2)
prec3 = abs(prec3)
if prec1 < prec2 and prec1 < prec3:
return 1
if prec2 < prec1 and prec2 < prec3:
return 2
if prec3 < prec2 and prec3 < prec1:
return 3
def main():
n = int(sys.argv[1])
time_rect = rectangle_time(n)
time_trap = trapeze_time(n)
time_simp = simpson_time(n)
dict_rect = calc_dict(time_rect, "Rectangles")
dict_trap = calc_dict(time_trap, "Trapezoids")
dict_simp = calc_dict(time_simp, "Simpson")
preci_rect = calcul_rectangles(n) - (pi / 2)
preci_trap = calcul_trapezoïds(n) - (pi / 2)
preci_simp = calcul_simpson(n) - (pi / 2)
sys.stdout = save
print("{}Compute time:\n{}".format(cyan, white))
print("Method : {}\t: {}{:.6f}{} sec".format(dict_rect.get("Name"), red, dict_rect.get("Value"), white))
print("Method : {}\t: {}{:.6f}{} sec".format(dict_trap.get("Name"), red, dict_trap.get("Value"), white))
print("Method : {}\t: {}{:.6f}{} sec".format(dict_simp.get("Name"), red, dict_simp.get("Value"), white))
min_time = get_min_time(dict_rect, dict_trap, dict_simp)
print("The fastest Method is:", end='')
print(green, end='')
if min_time == 1:
print("\tRectangles Method")
elif min_time == 2:
print("\tTrapezoids Method")
else:
print("\tSimpson Method")
print(white, end='')
print("\n{}Relative precision:\n{}".format(cyan, white))
print("Method : {}\t: {}{}{} a.u.".format(dict_rect.get("Name"), red, preci_rect, white))
print("Method : {}\t: {}{}{} a.u.".format(dict_trap.get("Name"), red, preci_trap, white))
print("Method : {}\t: {}{}{} a.u.".format(dict_simp.get("Name"), red, preci_simp, white))
preci = get_min_precision(preci_rect, preci_trap, preci_simp)
print("The most accurate:", end='')
print(green, end='')
if preci == 1:
print("\tRectangles Method")
elif preci == 2:
print("\tTrapezoids Method")
else:
print("\tSimpson Method")
print(white, end='')
main()
| 29.883333 | 108 | 0.594255 | [
"MIT"
] | ltabis/epitech-projects | 110borwein_2017/compare.py | 3,588 | Python |
# -*- coding: utf-8 -*-
#
# pysteps documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 31 01:11:37 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'numpydoc',
'sphinxcontrib.bibtex']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'source/index'
# General information about the project.
project = u'pysteps'
copyright = u'2018, Seppo Pulkkinen, Daniele Nerini and Loris Foresti'
author = u'Seppo Pulkkinen, Daniele Nerini and Loris Foresti'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2'
# The full version, including alpha/beta/rc tags.
release = u'0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
html_domain_indices = True
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystepsdoc'
# -- Options for LaTeX output ---------------------------------------------
# This hack is taken from numpy (https://github.com/numpy/numpy/blob/master/doc/source/conf.py).
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': latex_preamble
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_domain_indices = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysteps.tex', u'pysteps Reference',
u'Seppo Pulkkinen, Daniele Nerini and Loris Foresti', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysteps', u'pysteps Reference',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysteps', u'pysteps Reference',
author, 'pysteps', 'One line description of project.',
'Miscellaneous'),
]
| 32.284211 | 96 | 0.691066 | [
"BSD-3-Clause",
"MIT"
] | RubenImhoff/Large_Sample_Nowcasting_Evaluation | pysteps/doc/conf.py | 6,134 | Python |
import typing
import strawberry
def test_fetch_entities():
global Product
@strawberry.federation.type(keys=["upc"])
class Product:
upc: str
@classmethod
def resolve_reference(cls, upc):
return Product(upc)
@strawberry.federation.type(extend=True)
class Query:
@strawberry.field
def top_products(self, first: int) -> typing.List[Product]:
return []
schema = strawberry.federation.Schema(query=Query)
query = """
query ($representations: [_Any!]!) {
_entities(representations: $representations) {
... on Product {
upc
}
}
}
"""
result = schema.execute_sync(
query,
variable_values={
"representations": [{"__typename": "Product", "upc": "B00005N5PF"}]
},
)
assert not result.errors
assert result.data == {"_entities": [{"upc": "B00005N5PF"}]}
del Product
| 21.446809 | 79 | 0.549603 | [
"MIT"
] | patrick91/strawberry | tests/federation/test_entities.py | 1,008 | Python |
# Automatically generated from poetry/pyproject.toml
# flake8: noqa
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['c7n_trailcreator']
package_data = \
{'': ['*']}
install_requires = \
['argcomplete (>=1.11.1,<2.0.0)',
'attrs (>=19.3.0,<20.0.0)',
'boto3 (>=1.12.20,<2.0.0)',
'botocore (>=1.15.20,<2.0.0)',
'c7n (>=0.9.0,<0.10.0)',
'c7n-org (>=0.5.7,<0.6.0)',
'click (>=7.1.1,<8.0.0)',
'click>=7.0,<8.0',
'docutils (>=0.15.2,<0.16.0)',
'importlib-metadata (>=1.5.0,<2.0.0)',
'jmespath (>=0.9.5,<0.10.0)',
'jsonschema (>=3.2.0,<4.0.0)',
'pyrsistent (>=0.15.7,<0.16.0)',
'python-dateutil (>=2.8.1,<3.0.0)',
'pyyaml (>=5.3,<6.0)',
's3transfer (>=0.3.3,<0.4.0)',
'six (>=1.14.0,<2.0.0)',
'tabulate (>=0.8.6,<0.9.0)',
'urllib3 (>=1.25.8,<2.0.0)',
'zipp (>=3.1.0,<4.0.0)']
entry_points = \
{'console_scripts': ['c7n-trailcreator = c7n_trailcreator.trailcreator:cli']}
setup_kwargs = {
'name': 'c7n-trailcreator',
'version': '0.1.5',
'description': 'Cloud Custodian - Retroactive Tag Resource Creators from CloudTrail',
'long_description': '# c7n-trailcreator: Retroactive Resource Creator Tagging\n\nThis script will process cloudtrail records to create a sqlite db of\nresources and their creators, and then use that sqlitedb to tag\nthe resources with their creator\'s name.\n\nIn processing cloudtrail it can use either Athena or S3 Select. A\nconfig file of the events and resources of interest is required.\n\n## Install\n\n```shell\n$ pip install c7n_trailcreator\n\n$ c7n-trailcreator --help\n```\n\n## Config File\n\nThe config file format here is similiar to what custodian requires\nfor lambda policies on cloudtrail api events as an event selector.\n\nFirst for each resource, the custodian resource-type is required\nto be specified, and then for each event, we need to know the\nname of the service, the event name, and a jmespath expression\nto get the resource ids.\n\nHere\'s a a few examples, covering iam-user, iam-role, and and an s3 bucket.\n\n\n```json\n{\n "resources": [\n {\n "resource": "iam-role",\n "events": [\n {\n "event": "CreateRole",\n "ids": "requestParameters.roleName",\n "service": "iam.amazonaws.com"\n }\n ]\n },\n {\n "resource": "s3",\n "events": [\n {\n "ids": "requestParameters.bucketName",\n "event": "CreateBucket",\n "service": "s3.amazonaws.com"\n }\n ]\n },\n {\n "resource": "iam-user",\n "events": [\n {\n "event": "CreateUser",\n "ids": "requestParameters.userName",\n "service": "iam.amazonaws.com"\n }\n ]\n }]\n}\n```\n\n## Athena Usage\n\nTrail creators supports loading data from s3 using s3 select or from cloudtrail s3 using athena.\n\nNote you\'ll have to pre-created the athena table for cloudtrail previously per\nhttps://docs.aws.amazon.com/athena/latest/ug/cloudtrail-logs.html\n\nLet\'s use the example config file to load up data for all the roles, buckets, and users created in 2019\n\n```\nc7n-trailcreator load-athena \\\n --region us-east-1 \\\n\t--resource-map resource_map.json \\\n\t--table cloudtrail_logs_custodian_skunk_trails \\\n\t--db "creators.db" \\\n\t--year 2019\n```\n\nBy default we\'ll use the default s3 athena output used by the console,\nand the default db and primary workgroup, you can pass all of these in\non the cli to be more explicit.\n\nYou can also specify to just process a month with `--month 2019/11` or\nan individual day with `--day 2019/02/01`\n\n```\nINFO:c7n_trailowner:Athena query:569712dc-d1e9-4474-b86f-6579c53b5b46\nINFO:c7n_trailowner:Polling athena query progress scanned:489.24 Mb qexec:28.62s\nINFO:c7n_trailowner:Polling athena query progress scanned:1.29 Gb qexec:88.96s\nINFO:c7n_trailowner:Polling athena query progress scanned:2.17 Gb qexec:141.16s\nINFO:c7n_trailowner:processing athena result page 78 records\nINFO:c7n_trailowner:Athena Processed 78 records\n```\n\nNote you can reprocess a completed query\'s results, by passing in `--query-id` on the cli.\n\n## Tagging\n\nIt supports this across all the resources that custodian supports.\n\n```\n$ c7n-trailcreator tag \\\n\t--db creators.db \\\n\t--creator-tag Owner \\\n\t--region us-east-1\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 13 iam-role resources users:5 population:97 not-found:84 records:124\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 5 iam-user resources users:4 population:6 not-found:1 records:18\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 9 s3 resources users:4 population:14 not-found:5 records:20\nINFO:c7n_trailowner:auto tag summary account:644160558196 region:us-east-1\n iam-role-not-found: 84\n iam-role: 13\n iam-user-not-found: 1\n iam-user: 5\n s3-not-found: 5\n s3: 9\nINFO:c7n_trailowner:Total resources tagged: 27\n```\n\nlet\'s break down one of these log messages\n\n```\nINFO:c7n_trailowner:account:644160558196 region:us-east-1 tag 13 iam-role resources users:5 population:97 not-found:84 records:124\n```\n\n- records: the count of database create events we have for this resource type.\n- users: the number of unique users for whom we have create events.\n- not-found: the number of resources for whom we do not have create events, ie created before or after our trail analysis period.\n- population: the total number of resources in the account region.\n\n## Multi Account / Multi Region\n\nc7n-trailcreator supports executing across multiple accounts and regions when tagging\nusing the same file format that c7n-org uses to denote accounts. See `tag-org` subcommand.\n\n',
'long_description_content_type': 'text/markdown',
'author': 'Cloud Custodian Project',
'author_email': None,
'maintainer': None,
'maintainer_email': None,
'url': 'https://cloudcustodian.io',
'packages': packages,
'package_data': package_data,
'install_requires': install_requires,
'entry_points': entry_points,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
| 107.122807 | 4,623 | 0.698002 | [
"Apache-2.0"
] | rushrecon/cloud-custodian | tools/c7n_trailcreator/setup.py | 6,106 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""A collection of ORM sqlalchemy models for SQL Lab"""
from datetime import datetime
import re
from flask import Markup
from flask_appbuilder import Model
import sqlalchemy as sqla
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
Text,
)
from sqlalchemy.orm import backref, relationship
from superset import security_manager
from superset.models.helpers import AuditMixinNullable, ExtraJSONMixin
from superset.models.tags import QueryUpdater
from superset.utils.core import QueryStatus, user_label
class Query(Model, ExtraJSONMixin):
"""ORM model for SQL query
Now that SQL Lab support multi-statement execution, an entry in this
table may represent multiple SQL statements executed sequentially"""
__tablename__ = "query"
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True, nullable=False)
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(Integer, ForeignKey("ab_user.id"), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the superset config.
limit = Column(Integer)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# key used to store the results in the results backend
results_key = Column(String(64), index=True)
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=20, scale=6))
start_running_time = Column(Numeric(precision=20, scale=6))
end_time = Column(Numeric(precision=20, scale=6))
end_result_backend_time = Column(Numeric(precision=20, scale=6))
tracking_url = Column(Text)
changed_on = Column(
DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True
)
database = relationship(
"Database",
foreign_keys=[database_id],
backref=backref("queries", cascade="all, delete-orphan"),
)
user = relationship(security_manager.user_model, foreign_keys=[user_id])
__table_args__ = (sqla.Index("ti_user_id_changed_on", user_id, changed_on),)
def to_dict(self):
return {
"changedOn": self.changed_on,
"changed_on": self.changed_on.isoformat(),
"dbId": self.database_id,
"db": self.database.database_name,
"endDttm": self.end_time,
"errorMessage": self.error_message,
"executedSql": self.executed_sql,
"id": self.client_id,
"limit": self.limit,
"progress": self.progress,
"rows": self.rows,
"schema": self.schema,
"ctas": self.select_as_cta,
"serverId": self.id,
"sql": self.sql,
"sqlEditorId": self.sql_editor_id,
"startDttm": self.start_time,
"state": self.status.lower(),
"tab": self.tab_name,
"tempTable": self.tmp_table_name,
"userId": self.user_id,
"user": user_label(self.user),
"resultsKey": self.results_key,
"trackingUrl": self.tracking_url,
"extra": self.extra,
}
@property
def name(self):
"""Name property"""
ts = datetime.now().isoformat()
ts = ts.replace("-", "").replace(":", "").split(".")[0]
tab = self.tab_name.replace(" ", "_").lower() if self.tab_name else "notab"
tab = re.sub(r"\W+", "", tab)
return f"sqllab_{tab}_{ts}"
@property
def database_name(self):
return self.database.name
@property
def username(self):
return self.user.username
class SavedQuery(Model, AuditMixinNullable, ExtraJSONMixin):
"""ORM model for SQL query"""
__tablename__ = "saved_query"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("ab_user.id"), nullable=True)
db_id = Column(Integer, ForeignKey("dbs.id"), nullable=True)
schema = Column(String(128))
label = Column(String(256))
description = Column(Text)
sql = Column(Text)
user = relationship(
security_manager.user_model,
backref=backref("saved_queries", cascade="all, delete-orphan"),
foreign_keys=[user_id],
)
database = relationship(
"Database",
foreign_keys=[db_id],
backref=backref("saved_queries", cascade="all, delete-orphan"),
)
@property
def pop_tab_link(self):
return Markup(
f"""
<a href="/metrix/sqllab?savedQueryId={self.id}">
<i class="fa fa-link"></i>
</a>
"""
)
@property
def user_email(self):
return self.user.email
@property
def sqlalchemy_uri(self):
return self.database.sqlalchemy_uri
def url(self):
return "/metrix/sqllab?savedQueryId={0}".format(self.id)
# events for updating tags
sqla.event.listen(SavedQuery, "after_insert", QueryUpdater.after_insert)
sqla.event.listen(SavedQuery, "after_update", QueryUpdater.after_update)
sqla.event.listen(SavedQuery, "after_delete", QueryUpdater.after_delete)
| 33.958974 | 83 | 0.663395 | [
"Apache-2.0"
] | Zandut/Superset-Funnel | superset/models/sql_lab.py | 6,622 | Python |
import random
import time
def dead_state(width, height):
board = []
line = []
for i in range(width):
for j in range(height):
line.append(0)
board.append(line)
line = []
return board
def random_state(width, height):
state = dead_state(width, height)
for i in range(width):
for j in range(height):
state[i][j] = 1 if random.random() >= 0.5 else 0
return state
def render(state):
term_print = ''
for i in range(len(state[:])):
for j in range(len(state[i][:])):
if state[i][j] == 1:
term_print += '#'
else:
term_print += ' '
term_print += "\n"
print(term_print)
def next_state(state):
# check the inputs for the dead state
# how to get the length of the row and height from a list of lists
width = len(state[:])
height = len(state[:][:])
test_state = dead_state(width, height)
for i in range(len(state[:])):
for j in range(len(state[i][:])):
# Alive cell
if state[i][j] == 1:
test_state[i][j] = alive_cell(i,j,state)
# Dead cell
else:
test_state[i][j] = dead_cell(i,j,state)
return test_state
def alive_cell(i,j,state):
alive = 0
width = len(state[:])
height = len(state[:][:])
# break is not being utilized properly
# when the break hits it ends the innermost loop not just an iteration
for row in range(i-1,i+2):
for column in range(j-1,j+2):
# print('\t\talive',row,column)
if row < 0 or row >= height:
# too wide
continue
if column < 0 or column >= width:
# too tall
continue
if state[row][column] == 1:
alive += 1
# print('\talive',row,column)
alive -= 1
# print('alive', alive)
if alive == 2 or alive == 3:
# current cell stays alive
return 1
else:
# current cell dies
return 0
def dead_cell(i,j,state):
alive = 0
width = len(state[:])
height = len(state[:][:])
for row in range(i-1,i+2):
for column in range(j-1,j+2):
# print('\t\tdead',row,column)
if row < 0 or row >= height:
# too wide
continue
if column < 0 or column >= width:
# too tall
continue
if state[row][column] == 1:
alive += 1
# print('\tdead',row,column)
# print('dead', alive)
if alive == 3:
# current cell revives
return 1
else:
# current cell stays dead
return 0
def load_board_state(location):
board = []
x = []
with open(location, 'r') as f:
for line in f:
for ch in line:
if ch == '\n':
continue
x.append(int(ch))
board.append(x)
x = []
return board
if __name__ == '__main__':
loaded_board = load_board_state('./toad.txt')
render(loaded_board)
flag = False
while(True):
time.sleep(0.5)
if flag == False:
next_board = next_state(loaded_board)
render(next_board)
flag = True
else:
next_board = next_state(next_board)
render(next_board)
# init_state = random_state(25,25)
# render(init_state)
# count = 0
# while(True):
# # Wait for 1 second
# time.sleep(.5)
# if count == 0:
# next_board = next_state(init_state)
# render(next_board)
# count = 1
# else:
# next_board = next_state(next_board)
# render(next_board)
| 26.839161 | 74 | 0.496352 | [
"MIT"
] | Joes-BitGit/LearnPython | Projects/Game of Life/gol.py | 3,838 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 15:56:35 2019
@author: logancross
"""
from mvpa2.suite import *
from os import listdir
import time
def make_targets(subj, glm_ds_file, mask_name, runs2use, class_dict, homedir, ana_name):
start_time = time.time()
print 'Starting making targets',time.time() - start_time
onsets_folder = homedir+'DATA/brain/MODELS/RSA/'+ana_name+'/sub-'+subj+'/glm/timing/'
trial_list = []
trial_categ_list = []
chunks_list = []
for run in range(1,4):
temp_folder = onsets_folder+ana_name+'_run-0'+str(run)
csm_onsets = np.genfromtxt(temp_folder+'_CS_CSm.txt')
cs_deval_onsets = np.genfromtxt(temp_folder+'_CS_deval.txt')
cs_val_onsets = np.genfromtxt(temp_folder+'_CS_val.txt')
#get timing for all conditions and sort by this timing
timing = np.concatenate((csm_onsets[:,0], cs_deval_onsets[:,0], cs_val_onsets[:,0]))
#add a list of trial category as a sample attribute
trial_categ_unsort = [['csm' for c in range(len(csm_onsets))],['cs_deval' for c in range(len(cs_deval_onsets))],['cs_val' for c in range(len(cs_val_onsets))]]
trial_categ_unsort = [item for sublist in trial_categ_unsort for item in sublist]
#sort by trial timing and append to lists
sort_time_inds = np.argsort(timing)
all_trials = np.concatenate((csm_onsets, cs_deval_onsets, cs_val_onsets))
all_trials = all_trials[sort_time_inds,:]
trial_list.append(all_trials)
trial_categ = [trial_categ_unsort[ind] for ind in sort_time_inds]
trial_categ_list.append(trial_categ)
chunks = run*np.ones([len(all_trials)])
chunks_list.append(chunks)
#unroll lists of lists to one list
trials_allruns = np.asarray([item for sublist in trial_list for item in sublist])
trial_categ_allruns = [item for sublist in trial_categ_list for item in sublist]
chunks_allruns = np.asarray([item for sublist in chunks_list for item in sublist]).astype(int)
cs_classes = [class_dict[trial] for trial in trial_categ_allruns]
#load fmri dataset with these values as targets
fds = fmri_dataset(samples=glm_ds_file, targets=cs_classes, chunks=chunks_allruns, mask=mask_name)
print 'changes happened4'
fds.sa['trial_type'] = trial_categ_allruns
fds_subset = fds[:runs2use*60,:]
print 'Finished making targets',time.time() - start_time
#return fds_subset, trial_categ_allruns[:runs2use*60]
return fds_subset
def make_targets2(subj, glm_ds_file, mask_name, runs2use, class_dict):
start_time = time.time()
print 'Starting making targets',time.time() - start_time
onsets_folder = '/Users/logancross/Documents/EvaPavlovian/analysis/timing_files2/sub-'+subj+'/'
trial_list = []
trial_categ_list = []
chunks_list = []
for run in range(1,4):
temp_folder = onsets_folder+'GLM-02_run-0'+str(run)
csm_onsets = np.genfromtxt(temp_folder+'_CS_CSm.txt')
cs_deval_L_onsets = np.genfromtxt(temp_folder+'_CS_deval_L.txt')
cs_deval_R_onsets = np.genfromtxt(temp_folder+'_CS_deval_R.txt')
cs_val_L_onsets = np.genfromtxt(temp_folder+'_CS_val_L.txt')
cs_val_R_onsets = np.genfromtxt(temp_folder+'_CS_val_R.txt')
#get timing for all conditions and sort by this timing
timing = np.concatenate((csm_onsets[:,0], cs_deval_L_onsets[:,0], cs_deval_R_onsets[:,0], cs_val_L_onsets[:,0], cs_val_R_onsets[:,0]))
#add a list of trial category as a sample attribute
trial_categ_unsort = [['csm' for c in range(len(csm_onsets))],['cs_deval_L' for c in range(len(cs_deval_L_onsets))],['cs_deval_R' for c in range(len(cs_deval_R_onsets))],
['cs_val_L' for c in range(len(cs_val_L_onsets))], ['cs_val_R' for c in range(len(cs_val_R_onsets))]]
trial_categ_unsort = [item for sublist in trial_categ_unsort for item in sublist]
#sort by trial timing and append to lists
sort_time_inds = np.argsort(timing)
all_trials = np.concatenate((csm_onsets, cs_deval_L_onsets, cs_deval_R_onsets, cs_val_L_onsets, cs_val_R_onsets))
all_trials = all_trials[sort_time_inds,:]
trial_list.append(all_trials)
trial_categ = [trial_categ_unsort[ind] for ind in sort_time_inds]
trial_categ_list.append(trial_categ)
chunks = run*np.ones([len(all_trials)])
chunks_list.append(chunks)
#unroll lists of lists to one list
trials_allruns = np.asarray([item for sublist in trial_list for item in sublist])
trial_categ_allruns = [item for sublist in trial_categ_list for item in sublist]
chunks_allruns = np.asarray([item for sublist in chunks_list for item in sublist]).astype(int)
cs_classes = [class_dict[trial] for trial in trial_categ_allruns]
#load fmri dataset with these values as targets
fds = fmri_dataset(samples=glm_ds_file, targets=cs_classes, chunks=chunks_allruns, mask=mask_name)
fds_subset = fds[:runs2use*60,:]
print 'Finished making targets',time.time() - start_time
return fds_subset
def plot_mtx(mtx, labels, title, skip=5):
# little helper function to plot dissimilarity matrices
# if using correlation-distance, we use colorbar range of [0,2]
pl.figure()
pl.imshow(mtx, interpolation='nearest')
pl.xticks(range(len(mtx))[::skip], labels[::skip], rotation=90)
pl.yticks(range(len(mtx))[::skip], labels[::skip])
pl.title(title)
pl.clim((0, 2))
pl.colorbar()
class CrossDecodingFilter(Node):
def __init__(self, target_groups, part_attr, target_attr,
space='filtered_partitions', **kwargs):
self._target_groups = target_groups
self._target_attr = target_attr
self._part_attr = part_attr
Node.__init__(self, space=space, **kwargs)
def generate(self, ds):
# binary mask for training and testing ortion
train_part = ds.sa[self._part_attr].value == 1
test_part = ds.sa[self._part_attr].value == 2
# binary mask for the first and second target group
match_1st_group = [t in self._target_groups[0]
for t in ds.sa[self._target_attr].value]
match_2nd_group = [t in self._target_groups[1]
for t in ds.sa[self._target_attr].value]
match_3rd_group = [t in self._target_groups[2]
for t in ds.sa[self._target_attr].value]
# in the first to-be-returned dataset we will blank out
# group1 in the training set and group2 in the testing set
#LOGAN: we will also blank out group 3 in the testing set since we only want to train on it
# Note: setting the partition value to zero, will cause the Splitter
# employed in the CrossValidation Measure to ignore the corresponding
# samples
new_part = ds.sa[self._part_attr].value.copy()
new_part[np.logical_and(train_part, match_1st_group)] = 0
new_part[np.logical_and(test_part, match_2nd_group)] = 0
new_part[np.logical_and(test_part, match_3rd_group)] = 0
ds.sa[self.get_space()] = new_part
yield ds
# in the second to-be-returned dataset we will blank out
# group2 in the training set and group1 in the testing set
new_part = ds.sa[self._part_attr].value.copy()
new_part[np.logical_and(train_part, match_2nd_group)] = 0
new_part[np.logical_and(test_part, match_1st_group)] = 0
new_part[np.logical_and(test_part, match_3rd_group)] = 0
ds.sa[self.get_space()] = new_part
yield ds
| 46.90303 | 178 | 0.680708 | [
"CC0-1.0"
] | munoztd0/OBIWAN | ANALYSIS/T0/MVPA/PYmvpa/cross_decoding/mvpa_utils_pav.py | 7,739 | Python |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet) #No base_name needed for we have a queryset in the view
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
| 32.470588 | 108 | 0.768116 | [
"MIT"
] | ncadet-dev/profiles-rest-api | profiles_api/urls.py | 552 | Python |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""User profiles module for Invenio."""
from __future__ import absolute_import, print_function
from . import config
from .api import current_userprofile
class InvenioUserProfiles(object):
"""Invenio-UserProfiles extension."""
def __init__(self, app=None):
"""Extension initialization."""
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
# Register current_profile
app.context_processor(lambda: dict(
current_userprofile=current_userprofile))
app.extensions['invenio-userprofiles'] = self
def init_config(self, app):
"""Initialize configuration."""
excludes = [
'USERPROFILES_BASE_TEMPLATE',
'USERPROFILES_SETTINGS_TEMPLATE',
]
for k in dir(config):
if k.startswith('USERPROFILES_') and k not in excludes:
app.config.setdefault(k, getattr(config, k))
app.config.setdefault('USERPROFILES', True)
app.config.setdefault(
'USERPROFILES_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_userprofiles/base.html'))
app.config.setdefault(
'USERPROFILES_SETTINGS_TEMPLATE',
app.config.get('SETTINGS_TEMPLATE',
'invenio_userprofiles/settings/base.html'))
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
app.config.setdefault(
'USERPROFILES_REGISTER_USER_BASE_TEMPLATE',
app.config.get(
'SECURITY_REGISTER_USER_TEMPLATE',
'invenio_accounts/register_user.html'
)
)
app.config['SECURITY_REGISTER_USER_TEMPLATE'] = \
'invenio_userprofiles/register_user.html'
| 31.686567 | 72 | 0.617051 | [
"MIT"
] | 0x2b3bfa0/invenio-userprofiles | invenio_userprofiles/ext.py | 2,123 | Python |
from onegov.election_day.collections.data_sources import DataSourceCollection
from onegov.election_day.collections.data_sources import \
DataSourceItemCollection
from onegov.election_day.collections.notifications import \
NotificationCollection
from onegov.election_day.collections.archived_results import \
ArchivedResultCollection, SearchableArchivedResultCollection
from onegov.election_day.collections.screens import ScreenCollection
from onegov.election_day.collections.subscribers import \
EmailSubscriberCollection
from onegov.election_day.collections.subscribers import \
SmsSubscriberCollection
from onegov.election_day.collections.subscribers import SubscriberCollection
from onegov.election_day.collections.upload_tokens import UploadTokenCollection
__all__ = [
'ArchivedResultCollection',
'DataSourceCollection',
'DataSourceItemCollection',
'EmailSubscriberCollection',
'NotificationCollection',
'ScreenCollection',
'SearchableArchivedResultCollection',
'SmsSubscriberCollection',
'SubscriberCollection',
'UploadTokenCollection',
]
| 38.137931 | 79 | 0.833635 | [
"MIT"
] | politbuero-kampagnen/onegov-cloud | src/onegov/election_day/collections/__init__.py | 1,106 | Python |
from flask import Flask
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from config import config_options
bootstrap = Bootstrap()
db = SQLAlchemy()
migrate = Migrate()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
mail = Mail()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
login_manager.init_app(app)
mail.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix="/authenticate")
return app
| 23.97619 | 70 | 0.771599 | [
"MIT"
] | Benardakaka/Blog-Site | app/__init__.py | 1,007 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'ActiveDirectoryArgs',
'ExportPolicyRuleArgs',
'VolumePropertiesExportPolicyArgs',
]
@pulumi.input_type
class ActiveDirectoryArgs:
def __init__(__self__, *,
active_directory_id: Optional[pulumi.Input[str]] = None,
dns: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
organizational_unit: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
smb_server_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Active Directory
:param pulumi.Input[str] active_directory_id: Id of the Active Directory
:param pulumi.Input[str] dns: Comma separated list of DNS server IP addresses for the Active Directory domain
:param pulumi.Input[str] domain: Name of the Active Directory domain
:param pulumi.Input[str] organizational_unit: The Organizational Unit (OU) within the Windows Active Directory
:param pulumi.Input[str] password: Plain text password of Active Directory domain administrator
:param pulumi.Input[str] smb_server_name: NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
:param pulumi.Input[str] status: Status of the Active Directory
:param pulumi.Input[str] username: Username of Active Directory domain administrator
"""
if active_directory_id is not None:
pulumi.set(__self__, "active_directory_id", active_directory_id)
if dns is not None:
pulumi.set(__self__, "dns", dns)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if organizational_unit is not None:
pulumi.set(__self__, "organizational_unit", organizational_unit)
if password is not None:
pulumi.set(__self__, "password", password)
if smb_server_name is not None:
pulumi.set(__self__, "smb_server_name", smb_server_name)
if status is not None:
pulumi.set(__self__, "status", status)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="activeDirectoryId")
def active_directory_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Active Directory
"""
return pulumi.get(self, "active_directory_id")
@active_directory_id.setter
def active_directory_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "active_directory_id", value)
@property
@pulumi.getter
def dns(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of DNS server IP addresses for the Active Directory domain
"""
return pulumi.get(self, "dns")
@dns.setter
def dns(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Active Directory domain
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="organizationalUnit")
def organizational_unit(self) -> Optional[pulumi.Input[str]]:
"""
The Organizational Unit (OU) within the Windows Active Directory
"""
return pulumi.get(self, "organizational_unit")
@organizational_unit.setter
def organizational_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organizational_unit", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Plain text password of Active Directory domain administrator
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="smbServerName")
def smb_server_name(self) -> Optional[pulumi.Input[str]]:
"""
NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes
"""
return pulumi.get(self, "smb_server_name")
@smb_server_name.setter
def smb_server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "smb_server_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the Active Directory
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
Username of Active Directory domain administrator
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class ExportPolicyRuleArgs:
def __init__(__self__, *,
allowed_clients: Optional[pulumi.Input[str]] = None,
cifs: Optional[pulumi.Input[bool]] = None,
nfsv3: Optional[pulumi.Input[bool]] = None,
nfsv4: Optional[pulumi.Input[bool]] = None,
rule_index: Optional[pulumi.Input[int]] = None,
unix_read_only: Optional[pulumi.Input[bool]] = None,
unix_read_write: Optional[pulumi.Input[bool]] = None):
"""
Volume Export Policy Rule
:param pulumi.Input[str] allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
:param pulumi.Input[bool] cifs: Allows CIFS protocol
:param pulumi.Input[bool] nfsv3: Allows NFSv3 protocol
:param pulumi.Input[bool] nfsv4: Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
:param pulumi.Input[int] rule_index: Order index
:param pulumi.Input[bool] unix_read_only: Read only access
:param pulumi.Input[bool] unix_read_write: Read and write access
"""
if allowed_clients is not None:
pulumi.set(__self__, "allowed_clients", allowed_clients)
if cifs is not None:
pulumi.set(__self__, "cifs", cifs)
if nfsv3 is not None:
pulumi.set(__self__, "nfsv3", nfsv3)
if nfsv4 is not None:
pulumi.set(__self__, "nfsv4", nfsv4)
if rule_index is not None:
pulumi.set(__self__, "rule_index", rule_index)
if unix_read_only is not None:
pulumi.set(__self__, "unix_read_only", unix_read_only)
if unix_read_write is not None:
pulumi.set(__self__, "unix_read_write", unix_read_write)
@property
@pulumi.getter(name="allowedClients")
def allowed_clients(self) -> Optional[pulumi.Input[str]]:
"""
Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names
"""
return pulumi.get(self, "allowed_clients")
@allowed_clients.setter
def allowed_clients(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allowed_clients", value)
@property
@pulumi.getter
def cifs(self) -> Optional[pulumi.Input[bool]]:
"""
Allows CIFS protocol
"""
return pulumi.get(self, "cifs")
@cifs.setter
def cifs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cifs", value)
@property
@pulumi.getter
def nfsv3(self) -> Optional[pulumi.Input[bool]]:
"""
Allows NFSv3 protocol
"""
return pulumi.get(self, "nfsv3")
@nfsv3.setter
def nfsv3(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv3", value)
@property
@pulumi.getter
def nfsv4(self) -> Optional[pulumi.Input[bool]]:
"""
Deprecated: Will use the NFSv4.1 protocol, please use swagger version 2019-07-01 or later
"""
return pulumi.get(self, "nfsv4")
@nfsv4.setter
def nfsv4(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nfsv4", value)
@property
@pulumi.getter(name="ruleIndex")
def rule_index(self) -> Optional[pulumi.Input[int]]:
"""
Order index
"""
return pulumi.get(self, "rule_index")
@rule_index.setter
def rule_index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "rule_index", value)
@property
@pulumi.getter(name="unixReadOnly")
def unix_read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Read only access
"""
return pulumi.get(self, "unix_read_only")
@unix_read_only.setter
def unix_read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_only", value)
@property
@pulumi.getter(name="unixReadWrite")
def unix_read_write(self) -> Optional[pulumi.Input[bool]]:
"""
Read and write access
"""
return pulumi.get(self, "unix_read_write")
@unix_read_write.setter
def unix_read_write(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unix_read_write", value)
@pulumi.input_type
class VolumePropertiesExportPolicyArgs:
def __init__(__self__, *,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None):
"""
Set of export policy rules
:param pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]] rules: Export policy rule
"""
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]:
"""
Export policy rule
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]]):
pulumi.set(self, "rules", value)
| 36.328859 | 168 | 0.638925 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/netapp/v20190601/_inputs.py | 10,826 | Python |
from disco.test import TestCase, TestJob
from disco.compat import bytes_to_str
class SimpleJob(TestJob):
@staticmethod
def map(e, params):
yield int(e), (bytes_to_str(e)).strip()
@staticmethod
def reduce(iter, out, params):
for k, v in sorted(iter):
out.add(k, v)
class SimplerJob(SimpleJob):
@staticmethod
def reduce(iter, params):
return sorted(iter)
class SimpleTestCase(TestCase):
input = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
def answers(self):
return ((i, str(i)) for i in self.input for x in range(10))
def serve(self, path):
return '\n'.join([path] * 10)
def test_simple(self):
self.job = SimpleJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
def test_simpler(self):
self.job = SimplerJob().run(input=self.test_server.urls(self.input))
self.assertResults(self.job, self.answers())
| 27.8 | 76 | 0.634121 | [
"BSD-3-Clause"
] | DavidAlphaFox/disco | tests/test_simple.py | 973 | Python |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vmc.orgs.sddcs.networks.edges.firewall.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Config(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ConfigStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
org,
sddc,
edge_id,
):
"""
Delete firewall configuration for a management or compute gateway (NSX
Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('delete',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
})
def get(self,
org,
sddc,
edge_id,
):
"""
Retrieve the firewall configuration for a management or compute gateway
(NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallConfig`
:return: com.vmware.vmc.model.FirewallConfig
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
})
def update(self,
org,
sddc,
edge_id,
firewall_config,
):
"""
Configure firewall for a management or compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type firewall_config: :class:`com.vmware.vmc.model_client.FirewallConfig`
:param firewall_config: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('update',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'firewall_config': firewall_config,
})
class Statistics(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.statistics'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _StatisticsStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
org,
sddc,
edge_id,
rule_id,
):
"""
Retrieve statistics for a specific firewall rule for a management or
compute gateway (NSX Edge).
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Edge Identifier. (required)
:type rule_id: :class:`long`
:param rule_id: Rule Identifier. (required)
:rtype: :class:`com.vmware.vmc.model_client.FirewallRuleStats`
:return: com.vmware.vmc.model.FirewallRuleStats
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'rule_id': rule_id,
})
class _ConfigStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'firewall_config': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallConfig'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/config',
request_body_parameter='firewall_config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallConfig'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _StatisticsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'rule_id': type.IntegerType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/edges/{edgeId}/firewall/statistics/{ruleId}',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
'rule_id': 'ruleId',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.vmc.model_client', 'FirewallRuleStats'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vmc.orgs.sddcs.networks.edges.firewall.statistics',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Config': Config,
'Statistics': Statistics,
'config': 'com.vmware.vmc.orgs.sddcs.networks.edges.firewall.config_client.StubFactory',
}
| 38.201018 | 117 | 0.563112 | [
"MIT"
] | adammillerio/vsphere-automation-sdk-python | com/vmware/vmc/orgs/sddcs/networks/edges/firewall_client.py | 15,013 | Python |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for the pages for subtopics, and related models."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.domain import change_domain
from core.domain import html_validation_service
from core.domain import state_domain
from core.platform import models
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html'
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio'
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations'
CMD_CREATE_NEW = 'create_new'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property'
class SubtopicPageChange(change_domain.BaseChange):
"""Domain object for changes made to subtopic_page object.
The allowed commands, together with the attributes:
- 'create_new' (with topic_id, subtopic_id)
- 'update_subtopic_page_property' (
with property_name, new_value, old_value, subtopic_id).
"""
# The allowed list of subtopic page properties which can be used in
# update_subtopic_page_property command.
SUBTOPIC_PAGE_PROPERTIES = (
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML,
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO,
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': ['topic_id', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'required_attribute_names': [
'property_name', 'new_value', 'old_value', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': [],
'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES}
}]
class SubtopicPageContents(python_utils.OBJECT):
"""Domain object for the contents on a subtopic page."""
def __init__(
self, subtitled_html, recorded_voiceovers, written_translations):
"""Constructs a SubtopicPageContents domain object.
Args:
subtitled_html: SubtitledHtml. The html data being displayed on
the page.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the subtopic page content and their translations in different
languages.
written_translations: WrittenTranslations. The text translations of
the subtopic page content.
"""
self.subtitled_html = subtitled_html
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
def validate(self):
"""Validates the SubtopicPageContentsObject, verifying that all
fields are of the correct type.
"""
self.subtitled_html.validate()
content_ids = set([self.subtitled_html.content_id])
self.recorded_voiceovers.validate(content_ids)
self.written_translations.validate(content_ids)
@classmethod
def create_default_subtopic_page_contents(cls):
"""Creates a default subtopic page contents object.
Returns:
SubtopicPageContents. A default object.
"""
content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID
return cls(
state_domain.SubtitledHtml.create_default_subtitled_html(
content_id),
state_domain.RecordedVoiceovers.from_dict(
{'voiceovers_mapping': {content_id: {}}}),
state_domain.WrittenTranslations.from_dict(
{'translations_mapping': {content_id: {}}}))
def to_dict(self):
"""Returns a dict representing this SubtopicPageContents domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPageContents instance.
"""
return {
'subtitled_html': self.subtitled_html.to_dict(),
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict()
}
@classmethod
def from_dict(cls, page_contents_dict):
"""Creates a subtopic page contents object from a dictionary.
Args:
page_contents_dict: dict. The dict representation of
SubtopicPageContents object.
Returns:
SubtopicPageContents. The corresponding object.
"""
page_contents = state_domain.SubtitledHtml.from_dict(
page_contents_dict['subtitled_html'])
page_contents.validate()
return cls(
page_contents,
state_domain.RecordedVoiceovers.from_dict(page_contents_dict[
'recorded_voiceovers']),
state_domain.WrittenTranslations.from_dict(page_contents_dict[
'written_translations']))
class SubtopicPage(python_utils.OBJECT):
"""Domain object for a Subtopic page."""
def __init__(
self, subtopic_page_id, topic_id, page_contents,
page_contents_schema_version, language_code, version):
"""Constructs a SubtopicPage domain object.
Args:
subtopic_page_id: str. The unique ID of the subtopic page.
topic_id: str. The ID of the topic that this subtopic is a part of.
page_contents: SubtopicPageContents. The html and audio
translations to be surfaced to the learner.
page_contents_schema_version: int. The schema version for the page
contents object.
language_code: str. The ISO 639-1 code for the language this
subtopic page is written in.
version: int. The current version of the subtopic.
"""
self.id = subtopic_page_id
self.topic_id = topic_id
self.page_contents = page_contents
self.page_contents_schema_version = page_contents_schema_version
self.language_code = language_code
self.version = version
def to_dict(self):
"""Returns a dict representing this SubtopicPage domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPage instance.
"""
return {
'id': self.id,
'topic_id': self.topic_id,
'page_contents': self.page_contents.to_dict(),
'page_contents_schema_version': self.page_contents_schema_version,
'language_code': self.language_code,
'version': self.version
}
@classmethod
def get_subtopic_page_id(cls, topic_id, subtopic_id):
"""Returns the subtopic page id from the topic_id and subtopic_id.
Args:
topic_id: str. The id of the topic that the subtopic is a part of.
subtopic_id: int. The id of the subtopic.
Returns:
str. The subtopic_page_id calculated from the given values.
"""
return '%s-%s' % (topic_id, subtopic_id)
@classmethod
def create_default_subtopic_page(cls, subtopic_id, topic_id):
"""Creates a SubtopicPage object with default values.
Args:
subtopic_id: str. ID of the subtopic.
topic_id: str. The Id of the topic to which this page is linked
with.
Returns:
SubtopicPage. A subtopic object with given id, topic_id and default
page contents field.
"""
subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id)
return cls(
subtopic_page_id, topic_id,
SubtopicPageContents.create_default_subtopic_page_contents(),
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0)
@classmethod
def convert_html_fields_in_subtopic_page_contents(
cls, subtopic_page_contents_dict, conversion_fn):
"""Applies a conversion function on all the html strings in subtopic
page contents to migrate them to a desired state.
Args:
subtopic_page_contents_dict: dict. The dict representation of
subtopic page contents.
conversion_fn: function. The conversion function to be applied on
the subtopic_page_contents_dict.
Returns:
dict. The converted subtopic_page_contents_dict.
"""
subtopic_page_contents_dict['written_translations'] = (
state_domain.WrittenTranslations.
convert_html_in_written_translations(
subtopic_page_contents_dict['written_translations'],
conversion_fn))
subtopic_page_contents_dict['subtitled_html']['html'] = (
conversion_fn(
subtopic_page_contents_dict['subtitled_html']['html']))
return subtopic_page_contents_dict
@classmethod
def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict):
"""Converts v1 SubtopicPage Contents schema to the v2 schema.
v2 schema introduces the new schema for Math components.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.add_math_content_to_math_rte_components)
@classmethod
def _convert_page_contents_v2_dict_to_v3_dict(cls, page_contents_dict):
"""Converts v2 SubtopicPage Contents schema to the v3 schema.
v3 schema deprecates oppia-noninteractive-svgdiagram tag and converts
existing occurences of it to oppia-noninteractive-image tag.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.convert_svg_diagram_tags_to_image_tags)
@classmethod
def _convert_page_contents_v3_dict_to_v4_dict(cls, page_contents_dict):
"""Converts v3 SubtopicPage Contents schema to the v4 schema.
v4 schema fixes HTML encoding issues.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
return cls.convert_html_fields_in_subtopic_page_contents(
page_contents_dict,
html_validation_service.fix_incorrectly_encoded_chars)
@classmethod
def update_page_contents_from_model(
cls, versioned_page_contents, current_version):
"""Converts the page_contents blob contained in the given
versioned_page_contents dict from current_version to
current_version + 1. Note that the versioned_page_contents being
passed in is modified in-place.
Args:
versioned_page_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
page_contents dict.
- page_contents: dict. The dict comprising the subtopic page
contents.
current_version: int. The current schema version of page_contents.
"""
versioned_page_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_page_contents['page_contents'] = conversion_fn(
versioned_page_contents['page_contents'])
def get_subtopic_id_from_subtopic_page_id(self):
"""Returns the id from the subtopic page id of the object.
Returns:
int. The subtopic_id of the object.
"""
return int(self.id[len(self.topic_id) + 1:])
def update_page_contents_html(self, new_page_contents_html):
"""The new value for the html data field.
Args:
new_page_contents_html: SubtitledHtml. The new html for the subtopic
page.
"""
self.page_contents.subtitled_html = new_page_contents_html
def update_page_contents_audio(self, new_page_contents_audio):
"""The new value for the recorded_voiceovers data field.
Args:
new_page_contents_audio: RecordedVoiceovers. The new audio for
the subtopic page.
"""
self.page_contents.recorded_voiceovers = new_page_contents_audio
def update_page_contents_written_translations(
self, new_page_written_translations_dict):
"""The new value for the written_translations data field.
Args:
new_page_written_translations_dict: dict. The new translation for
the subtopic page.
"""
self.page_contents.written_translations = (
state_domain.WrittenTranslations.from_dict(
new_page_written_translations_dict))
def validate(self):
"""Validates various properties of the SubtopicPage object.
Raises:
ValidationError. One or more attributes of the subtopic page are
invalid.
"""
if not isinstance(self.topic_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected topic_id to be a string, received %s' %
self.topic_id)
if not isinstance(self.version, int):
raise utils.ValidationError(
'Expected version number to be an int, received %s' %
self.version)
self.page_contents.validate()
if not isinstance(self.page_contents_schema_version, int):
raise utils.ValidationError(
'Expected page contents schema version to be an integer, '
'received %s' % self.page_contents_schema_version)
if (
self.page_contents_schema_version !=
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected page contents schema version to be %s, received %s'
% (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
self.page_contents_schema_version)
)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not any(
self.language_code == lc['code']
for lc in constants.SUPPORTED_CONTENT_LANGUAGES
):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
| 39.339109 | 80 | 0.664758 | [
"Apache-2.0"
] | 5andeepNambiar/oppia | core/domain/subtopic_page_domain.py | 15,893 | Python |
import pytest
def test_cython_api_deprecation():
match = ("`scipy._lib._test_deprecation_def.foo_deprecated` "
"is deprecated, use `foo` instead!\n"
"Deprecated in Scipy 42.0.0")
with pytest.warns(DeprecationWarning, match=match):
from .. import _test_deprecation_call
assert _test_deprecation_call.call() == (1, 1)
| 33.090909 | 65 | 0.678571 | [
"BSD-3-Clause"
] | 0x0L/scipy | scipy/_lib/tests/test_deprecation.py | 364 | Python |
import urllib.request
import json
response = urllib.request.urlopen('https://raw.githubusercontent.com/Kitware/ParaView/master/ParaViewCore/' +
'ServerManager/Rendering/ColorMaps.json')
data = json.loads(response.read().decode('utf8'))
file = open('paraview_color_maps.py', 'w')
for item in data:
if 'RGBPoints' in item:
name = item['Name'].replace(' ', '_').replace('-', '_').replace('(', '').replace(')', '').replace('2', 'two_')\
.replace(',', '')
name = name[:1].upper() + name[1:]
file.write(name + ' = [\n')
list_ = [item['RGBPoints'][i:i + 4] for i in range(0, len(item['RGBPoints']), 4)]
for p in list_:
file.write(' ' + str(p[0]) + ', ' + str(p[1]) + ', ' + str(p[2]) + ', ' + str(p[3]) + ',\n')
file.write(']\n\n')
file.close()
| 34.48 | 119 | 0.525522 | [
"MIT"
] | Gulaabihaathee/K3D-jupyter | k3d/colormaps/generate_praview_color_maps.py | 862 | Python |
def tsd_section_name(pagename):
articlename = pagename.split('/')[1]
sectionname_new = ''
if 'ДО' in pagename.split('/'):
sectionname_new = articlename
else:
if articlename.endswith((' 1', ' 2', ' 3', ' 4')):
sectionname_new = articlename + '-1'
else:
sectionname_new = articlename + '1'
return sectionname_new
def tsd_calc_pagenum_offset(indexpage):
offsets = {
'Толковый словарь. Том 1 (Даль 1903).djvu': 17,
'Толковый словарь. Том 2 (Даль 1905).djvu': 2,
'Толковый словарь. Том 3 (Даль 1907).djvu': 2,
'Толковый словарь. Том 4 (Даль 1909).djvu': 4,
'Толковый словарь Даля (2-е издание). Том 1 (1880).pdf': 90,
'Толковый словарь Даля (2-е издание). Том 2 (1881).pdf': 9,
'Толковый словарь Даля (2-е издание). Том 3 (1882).pdf': 8,
'Толковый словарь Даля (2-е издание). Том 4 (1882).pdf': 8,
'Толковый словарь Даля (1-е издание). Часть 1 (1863).pdf': 2,
'Толковый словарь Даля (1-е издание). Часть 2 (1865).pdf': -626,
'Толковый словарь Даля (1-е издание). Часть 3 (1865).pdf': 1,
'Толковый словарь Даля (1-е издание). Часть 4 (1866).pdf': 3,
}
return offsets[indexpage]
def tsd_calc_volume(indexpage, edition, page_data):
v = None
volumes = [
{'Толковый словарь Даля (1-е издание). Часть 1 (1863).pdf': 1,
'Толковый словарь Даля (1-е издание). Часть 2 (1865).pdf': 2,
'Толковый словарь Даля (1-е издание). Часть 3 (1865).pdf': 3,
'Толковый словарь Даля (1-е издание). Часть 4 (1866).pdf': 4, },
{'Толковый словарь Даля (2-е издание). Том 1 (1880).pdf': 1,
'Толковый словарь Даля (2-е издание). Том 2 (1881).pdf': 2,
'Толковый словарь Даля (2-е издание). Том 3 (1882).pdf': 3,
'Толковый словарь Даля (2-е издание). Том 4 (1882).pdf': 4, },
{'Толковый словарь. Том 1 (Даль 1903).djvu': 1,
'Толковый словарь. Том 2 (Даль 1905).djvu': 2,
'Толковый словарь. Том 3 (Даль 1907).djvu': 3,
'Толковый словарь. Том 4 (Даль 1909).djvu': 4, }
]
try:
v = volumes[edition - 1][string_strip(indexpage)]
except:
print('Ошибка при определении тома по названию индексной страницы из тега <pages>: %s' % page_data['pagename'])
pass
return v
def string_strip(s):
return str(s).replace('\u200e', '').replace('‎', '').replace('‎', '').strip()
| 42.603448 | 119 | 0.591259 | [
"MIT"
] | vladiscripts/WS_text_formatter_and_uploader_to_Page_NS | scripts/tsd.py | 3,202 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import gradient_descent
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self, initializer="ones"):
super(SubclassedKerasModel, self).__init__()
self._can_use_graph_functions = True
self.layer_a = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_b = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_c = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_d = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")
self.layer_e = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")
def call(self, x):
x = self.layer_a(x)
x = self.layer_b(x)
x = self.layer_c(x)
x = self.layer_d(x)
return self.layer_e(x)
def make_keras_model(initializer="ones"):
model_input = keras.Input(shape=(10,))
x = keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
x = keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
x = keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
return keras.Model(inputs=model_input, outputs=x)
def make_sequential_keras_model(initializer="ones"):
model = keras.models.Sequential()
model.add(keras.layers.Dense(
64, kernel_initializer=initializer, bias_initializer="zeros",
input_shape=(10,)))
model.add(keras.layers.Dense(
128, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
256, kernel_initializer=initializer, bias_initializer="zeros"))
model.add(keras.layers.Dense(
10, kernel_initializer=initializer, bias_initializer="zeros"))
return model
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 1000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with ctx.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_constant(self):
func = lambda: constant_op.constant(3.0)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_sequential(self):
model = make_sequential_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_functional
func()
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def _benchmark_keras_model_fit(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_evaluate(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.evaluate(dataset, steps=1, verbose=0)
self._run(func, 1)
def _benchmark_keras_model_predict(self, model):
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat()
model.compile(
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
loss="mse")
func = lambda: model.predict(dataset, steps=1000, verbose=0)
# First call is more expensive (creates variables etc.), discount that.
model.predict(dataset, steps=1, verbose=0)
self._run(func, 1)
def benchmark_keras_model_subclassed_fit(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_graph_mode(self):
with context.graph_mode():
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_fit_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_graph_mode(self):
with context.graph_mode():
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_functional_fit_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_graph_mode(self):
with context.graph_mode():
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_sequential_fit_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_fit(model)
def benchmark_keras_model_subclassed_evaluate(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_evaluate_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_functional_evaluate_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_sequential_evaluate_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_evaluate(model)
def benchmark_keras_model_subclassed_predict(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_subclassed_predict_disable_defun(self):
model = SubclassedKerasModel(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict(self):
model = make_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_functional_predict_disable_defun(self):
model = make_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
self._benchmark_keras_model_predict(model)
def benchmark_keras_model_sequential_predict_disable_defun(self):
model = make_sequential_keras_model(initializer="glorot_uniform")
model._can_use_graph_functions = False
self._benchmark_keras_model_predict(model)
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
if __name__ == "__main__":
test.main()
| 35.542045 | 80 | 0.712185 | [
"Apache-2.0"
] | AishwaryaVarma/tensorflow | tensorflow/python/eager/benchmarks_test.py | 31,277 | Python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# =============================================================================
# title : magicblueshell.py
# description : Python tool to control Magic Blue bulbs over Bluetooth
# author : Benjamin Piouffle
# date : 23/11/2015
# usage : python magicblue.py
# python_version : 3.4
# =============================================================================
import argparse
import logging
import os
import sys
from sys import platform as _platform
import webcolors
from bluepy.btle import Scanner, DefaultDelegate
try:
from magicblue.magicbluelib import MagicBlue, Effect
from magicblue import __version__
except ImportError:
from magicbluelib import MagicBlue, Effect
from __init__ import __version__
logger = logging.getLogger(__name__)
class MagicBlueShell:
class Cmd:
def __init__(self, cmd_str, func, conn_required, help='', params=None,
aliases=None):
self.cmd_str = cmd_str
self.func = func
self.conn_required = conn_required
self.help = help
self.params = params or []
self.aliases = aliases or []
def __init__(self, bluetooth_adapter, bulb_version=7):
# List available commands and their usage. 'con_required' define if
# we need to be connected to a device for the command to run
self.available_cmds = [
MagicBlueShell.Cmd('help', self.list_commands, False,
help='Show this help'),
MagicBlueShell.Cmd('list_devices', self.cmd_list_devices, False,
help='List Bluetooth LE devices in range',
aliases=['ls']),
MagicBlueShell.Cmd('list_effects', self.cmd_list_effects, False,
help='List available effects',),
MagicBlueShell.Cmd('connect', self.cmd_connect, False,
help='Connect to light bulb',
params=['mac_address or ID']),
MagicBlueShell.Cmd('disconnect', self.cmd_disconnect, True,
help='Disconnect from current light bulb'),
MagicBlueShell.Cmd('set_color', self.cmd_set_color, True,
help="Change bulb's color",
params=['name or hexadecimal value']),
MagicBlueShell.Cmd('set_warm_light', self.cmd_set_warm_light, True,
help='Set warm light',
params=['intensity[0.0-1.0]']),
MagicBlueShell.Cmd('set_effect', self.cmd_set_effect, True,
help='Set an effect',
params=['effect_name', 'speed[1-20]']),
MagicBlueShell.Cmd('turn', self.cmd_turn, True,
help='Turn on / off the bulb',
params=['on|off']),
MagicBlueShell.Cmd('read', self.cmd_read, True,
help='Read device_info/datetime from the bulb',
params=['name|device_info|date_time']),
MagicBlueShell.Cmd('exit', self.cmd_exit, False,
help='Exit the script')
]
self.bluetooth_adapter = bluetooth_adapter
self._bulb_version = bulb_version
self._magic_blue = None
self._devices = []
self.last_scan = None
def start_interactive_mode(self):
print('Magic Blue interactive shell v{}'.format(__version__))
print('Type "help" for a list of available commands')
str_cmd = ''
while str_cmd != 'exit':
try:
str_cmd = input('> ').strip()
if str_cmd:
self.exec_cmd(str_cmd)
except (EOFError, KeyboardInterrupt): # Catch Ctrl+D / Ctrl+C
self.cmd_exit()
return
except Exception as e:
logger.error('Unexpected error with command "{}": {}'
.format(str_cmd, str(e)))
def exec_cmd(self, str_cmd):
cmd = self._get_command(str_cmd)
if cmd is not None:
if cmd.conn_required and not (self._magic_blue and
self._magic_blue.is_connected()):
logger.error('You must be connected to run this command')
elif self._check_args(str_cmd, cmd):
cmd.func(str_cmd.split()[1:])
else:
logger.error('"{}" is not a valid command.'
'Type "help" to see what you can do'
.format(str_cmd.split()[0]))
def print_usage(self, str_cmd):
cmd = self._get_command(str_cmd)
if cmd is not None:
print('Usage: {} {}'.format(cmd.cmd_str, ' '.join(cmd.params)))
else:
logger.error('Unknown command {}'.format(str_cmd))
return False
def cmd_list_devices(self, *args):
scan_time = 300
try:
self.last_scan = ScanDelegate()
scanner = Scanner().withDelegate(self.last_scan)
print('Listing Bluetooth LE devices in range for {} seconds. '
'Press CTRL+C to abort searching.'.format(scan_time))
print('{: <5} {: <30} {: <12}'.format('ID', 'Name', 'Mac address'))
print('{: <5} {: <30} {: <12}'.format('--', '----', '-----------'))
scanner.scan(scan_time)
except KeyboardInterrupt:
print('\n')
except RuntimeError as e:
logger.error('Problem with the Bluetooth adapter : {}'.format(e))
return False
def cmd_list_effects(self, *args):
for e in Effect.__members__.keys():
print(e)
def cmd_connect(self, *args):
# Use can enter either a mac address or the device ID from the list
if len(args[0][0]) < 4 and self.last_scan:
try:
dev_id = int(args[0][0]) - 1
entry = self.last_scan.devices[dev_id]
mac_address = entry.addr
addr_type = entry.addrType
except Exception:
logger.error('Bad ID / MAC address : {}'.format(args[0][0]))
return False
else:
addr_type = None
mac_address = args[0][0]
self._magic_blue = MagicBlue(mac_address,
version=self._bulb_version,
addr_type=addr_type)
self._magic_blue.connect(self.bluetooth_adapter)
logger.info('Connected')
def cmd_disconnect(self, *args):
self._magic_blue.disconnect()
self._magic_blue = None
def cmd_turn(self, *args):
if args[0][0] == 'on':
self._magic_blue.turn_on()
else:
self._magic_blue.turn_off()
def cmd_read(self, *args):
if args[0][0] == 'name':
name = self._magic_blue.get_device_name()
logger.info('Received name: {}'.format(name))
elif args[0][0] == 'device_info':
device_info = self._magic_blue.get_device_info()
logger.info('Received device_info: {}'.format(device_info))
elif args[0][0] == 'date_time':
datetime_ = self._magic_blue.get_date_time()
logger.info('Received datetime: {}'.format(datetime_))
def cmd_set_color(self, *args):
color = args[0][0]
try:
if color.startswith('#'):
self._magic_blue.set_color(webcolors.hex_to_rgb(color))
else:
self._magic_blue.set_color(webcolors.name_to_rgb(color))
except ValueError as e:
logger.error('Invalid color value : {}'.format(str(e)))
self.print_usage('set_color')
def cmd_set_warm_light(self, *args):
try:
self._magic_blue.set_warm_light(float(args[0][0]))
except ValueError as e:
logger.error('Invalid intensity value : {}'.format(str(e)))
self.print_usage('set_color')
def cmd_set_effect(self, *args):
try:
[effect, speed] = args[0]
effect = Effect[effect]
speed = int(speed)
except KeyError as key:
logger.error('Unknown effect {}'.format(key))
except ValueError:
self.print_usage('set_effect')
else:
self._magic_blue.set_effect(effect, speed)
def list_commands(self, *args):
print(' ----------------------------')
print('| List of available commands |')
print(' ----------------------------')
print('{: <16}{: <30}{}'.format('COMMAND', 'PARAMETERS', 'DETAILS'))
print('{: <16}{: <30}{}'.format('-------', '----------', '-------'))
for command in self.available_cmds:
print('{: <16}{: <30}{}'.format(
command.cmd_str, ' '.join(command.params), command.help))
for alias in command.aliases:
print('{: <16}{: <30}{}'.format(alias, '//', '//'))
def cmd_exit(self, *args):
print('Bye !')
def _check_args(self, str_cmd, cmd):
expected_nb_args = len(cmd.params)
args = str_cmd.split()[1:]
if len(args) != expected_nb_args:
self.print_usage(str_cmd.split()[0])
return False
return True
def _get_command(self, str_cmd):
str_cmd = str_cmd.split()[0]
return next((item for item in self.available_cmds
if item.cmd_str == str_cmd or str_cmd in item.aliases
), None)
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
self.devices = []
def handleDiscovery(self, dev, is_new_device, is_new_data):
if is_new_device:
self.devices.append(dev)
raw_name = dev.getValueText(9)
dev_name = raw_name.split('\x00')[0] if raw_name else "NO_NAME"
print('{: <5} {: <30} {: <12}'.format(len(self.devices),
dev_name,
dev.addr))
def get_params():
parser = argparse.ArgumentParser(description='Python tool to control Magic'
'Blue bulbs over Bluetooth')
parser.add_argument('-l', '--list_commands',
dest='list_commands',
help='List available commands',
action='store_true')
parser.add_argument('-c', '--command',
dest='command',
help='Command to execute')
parser.add_argument('-m', '--mac_address',
dest='mac_address',
help='Device mac address. Must be set if command given'
' in -c needs you to be connected')
parser.add_argument('-a', '--bluetooth_adapter',
default='hci0',
dest='bluetooth_adapter',
help='Bluetooth adapter name as listed by hciconfig')
parser.add_argument('-b', '--bulb-version',
default='7',
dest='bulb_version',
type=int,
help='Bulb version as displayed in the official app')
return parser.parse_args()
def main():
params = get_params()
# Exit if not root
if (_platform == "linux" or _platform == "linux2") and os.geteuid() != 0:
logger.error("Script must be run as root")
return 1
shell = MagicBlueShell(params.bluetooth_adapter, params.bulb_version)
if params.list_commands:
shell.list_commands()
elif params.command:
logging.basicConfig(level=logging.WARNING)
if params.mac_address:
shell.cmd_connect([params.mac_address])
shell.exec_cmd(params.command)
else:
logging.basicConfig(level=logging.INFO)
shell.start_interactive_mode()
return 0
if __name__ == '__main__':
sys.exit(main())
| 39.785714 | 79 | 0.527664 | [
"MIT"
] | mouth4war/magicblue | magicblue/magicblueshell.py | 12,254 | Python |
from typing import Optional
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from python_todopago.helpers import Authorization
from python_todopago.helpers import OperationStatus
from payments import PaymentError
from payments import PaymentStatus
from payments import PurchasedItem
from payments.todopago import TodoPagoProvider
class Payment(Mock):
id = 1
description = "payment"
currency = "ARS"
status = PaymentStatus.WAITING
message = None
total = 100
transaction_id: Optional[str] = None
billing_first_name = "John"
billing_last_name = "Doe"
billing_address_1 = "Some Address"
billing_address_2 = "Some Address"
billing_postcode = "12345"
billing_city = "Some City"
billing_country_code = "AR"
billing_country_area = "Capital Federal"
billing_phone = "+543513840247"
customer_ip_address = "192.168.0.1"
billing_email = "[email protected]"
def change_status(self, status, message=""):
self.status = status
self.message = message
def change_fraud_status(self, status, message="", commit=True):
self.fraud_status = status
self.fraud_message = message
def get_success_url(self):
return "http://example.com/success"
def get_failure_url(self):
return "http://example.com/failure"
def get_process_url(self):
return "http://example.com/process"
def get_purchased_items(self):
yield PurchasedItem(
name="Some Product",
sku="SP12",
quantity=1,
price=100,
currency="ARS",
)
@pytest.fixture
def tp_provider():
return TodoPagoProvider(
"PRISMA f3d8b72c94ab4a06be2ef7c95490f7d3", 2153, sandbox=True
)
def test_authorize_operation(tp_provider):
payment = Payment()
authorization = Authorization(
status_code=-1,
status_message="Solicitud de Autorizacion Registrada",
form_url="https://forms.todopago.com.ar/formulario/commands?command=formulario&m=a6104bad3-1be7-4e8e-932e-e927100b2e86&fr=1",
request_key="f5ad41bc-92ba-40ff-889d-8a23fe562a28",
public_request_key="a6104bad3-1be7-4e8e-932e-e927100b2e86",
)
with patch(
"python_todopago.TodoPagoConnector.authorize_operation",
spec=True,
return_value=authorization,
):
tp_provider.authorize_operation(payment)
assert payment.status == PaymentStatus.WAITING
assert payment.attrs.request_key == "f5ad41bc-92ba-40ff-889d-8a23fe562a28"
def test_approved_payment_notification(rf, tp_provider):
payment = Payment()
payment.attrs.request_key = "1fb7cc9a-14dd-42ec-bf1e-6d5820799642"
payment.attrs.form_url = (
"https://forms.todopago.com.ar/formulario/commands?command=formulario&m=a6104bad3-1be7-4e8e-932e-e927100b2e86&fr=1",
)
payment.save()
request = rf.get(
"/payments/process/d16695e8-b76d-4438-bd10-634545ecb1d6/",
{"Answer": "44caba31-1373-4544-aa6b-42abff696944"},
)
operation_status = OperationStatus(
status_code=-1,
status_message="APROBADA",
authorization_key="817824df-8614-4ce8-a6c9-abdf884024ab",
)
with patch(
"python_todopago.TodoPagoConnector.get_operation_status",
spec=True,
return_value=operation_status,
), patch("payments.todopago.redirect", spec=True) as redirect:
rv = tp_provider.process_callback(payment, request)
assert rv == redirect(payment.get_success_url())
def test_rejected_payment_notification(rf, tp_provider):
payment = Payment()
payment.attrs.request_key = "1fb7cc9a-14dd-42ec-bf1e-6d5820799642"
payment.attrs.form_url = (
"https://forms.todopago.com.ar/formulario/commands?command=formulario&m=a6104bad3-1be7-4e8e-932e-e927100b2e86&fr=1",
)
payment.save()
request = rf.get(
"/payments/process/d16695e8-b76d-4438-bd10-634545ecb1d6/",
{"Answer": "44caba31-1373-4544-aa6b-42abff696944"},
)
operation_status = OperationStatus(
status_code=99998,
status_message="-",
authorization_key="-",
)
with patch(
"python_todopago.TodoPagoConnector.get_operation_status",
spec=True,
return_value=operation_status,
), pytest.raises(PaymentError, match="didn't approve the payment"):
_ = tp_provider.process_callback(payment, request)
| 30.765517 | 141 | 0.691549 | [
"BSD-3-Clause"
] | Natureshadow/django-payments | payments/todopago/test_todopago.py | 4,461 | Python |
from pyopenproject.business.root_service import RootService
from pyopenproject.business.services.command.root.find import Find
class RootServiceImpl(RootService):
def __init__(self, connection):
"""Constructor for class RootServiceImpl, from RootService
:param connection: The connection data
"""
super().__init__(connection)
def find(self):
return Find(self.connection).execute()
| 27.125 | 66 | 0.723502 | [
"MIT"
] | Flying-Free/pyopenproject | pyopenproject/business/services/root_service_impl.py | 434 | Python |
import os
import shutil
from datetime import timedelta
from django.contrib.admin.sites import AdminSite
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.utils import timezone
from allauth.account.models import EmailAddress
from rest_framework.test import APITestCase, APIClient
from challenges.models import Challenge, ChallengePhase
from hosts.models import ChallengeHostTeam
from jobs.models import Submission
from jobs.admin import SubmissionAdmin
from participants.models import ParticipantTeam, Participant
class BaseAPITestClass(APITestCase):
def setUp(self):
self.client = APIClient(enforce_csrf_checks=True)
self.user = User.objects.create(
username="someuser",
email="[email protected]",
password="secret_password",
)
EmailAddress.objects.create(
user=self.user, email="[email protected]", primary=True, verified=True
)
self.user1 = User.objects.create(
username="someuser1",
email="[email protected]",
password="secret_password1",
)
EmailAddress.objects.create(
user=self.user1,
email="[email protected]",
primary=True,
verified=True,
)
self.challenge_host_team = ChallengeHostTeam.objects.create(
team_name="Test Challenge Host Team", created_by=self.user
)
self.participant_team = ParticipantTeam.objects.create(
team_name="Participant Team for Challenge", created_by=self.user1
)
self.participant = Participant.objects.create(
user=self.user1,
status=Participant.SELF,
team=self.participant_team,
)
self.challenge = Challenge.objects.create(
title="Test Challenge",
description="Description for test challenge",
terms_and_conditions="Terms and conditions for test challenge",
submission_guidelines="Submission guidelines for test challenge",
creator=self.challenge_host_team,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
published=False,
enable_forum=True,
anonymous_leaderboard=False,
)
try:
os.makedirs("/tmp/evalai")
except OSError:
pass
with self.settings(MEDIA_ROOT="/tmp/evalai"):
self.challenge_phase = ChallengePhase.objects.create(
name="Challenge Phase",
description="Description for Challenge Phase",
leaderboard_public=False,
is_public=False,
start_date=timezone.now() - timedelta(days=2),
end_date=timezone.now() + timedelta(days=1),
challenge=self.challenge,
test_annotation=SimpleUploadedFile(
"test_sample_file.txt",
b"Dummy file content",
content_type="text/plain",
),
)
self.submission = Submission.objects.create(
participant_team=self.participant_team,
challenge_phase=self.challenge_phase,
created_by=self.challenge_host_team.created_by,
status="submitted",
input_file=self.challenge_phase.test_annotation,
method_name="Test Method",
method_description="Test Description",
project_url="http://testserver/",
publication_url="http://testserver/",
is_public=True,
)
self.client.force_authenticate(user=self.user)
def tearDown(self):
shutil.rmtree("/tmp/evalai")
class MockRequest(object):
pass
request = MockRequest()
class SubmissionAdminTest(BaseAPITestClass):
"""
Test case for re-running submissions from admin
"""
def setUp(self):
super(SubmissionAdminTest, self).setUp()
self.app_admin = SubmissionAdmin(Submission, AdminSite())
def test_submit_job_to_worker(self):
Submission.objects.filter(status=self.submission.status).update(
status="finished"
)
queryset = Submission.objects.filter(status="finished")
self.app_admin.submit_job_to_worker(request, queryset)
self.assertEqual(
Submission.objects.filter(status="submitted").count(), 1
)
def test_make_submission_public(self):
# make all submissions private before test
Submission.objects.filter(is_public=self.submission.is_public).update(
is_public=False
)
queryset = Submission.objects.filter(is_public=False)
self.app_admin.make_submission_public(request, queryset)
self.assertEqual(Submission.objects.filter(is_public=True).count(), 1)
def test_make_submission_private(self):
# make all submissions public before test
Submission.objects.filter(is_public=False).update(
is_public=True
)
queryset = Submission.objects.filter(is_public=True)
self.app_admin.make_submission_private(request, queryset)
self.assertEqual(Submission.objects.filter(is_public=False).count(), 1)
| 33.043478 | 79 | 0.640789 | [
"BSD-3-Clause"
] | Mukul2000/EvalAI | tests/unit/jobs/test_admin.py | 5,320 | Python |
import geohash
import redis
from addok.config import config
from addok.db import DB
from addok.ds import get_document
from . import iter_pipe, keys, yielder
VALUE_SEPARATOR = '|~|'
def preprocess(s):
if s not in _CACHE:
_CACHE[s] = list(iter_pipe(s, config.PROCESSORS))
return _CACHE[s]
_CACHE = {}
def token_key_frequency(key):
return DB.zcard(key)
def token_frequency(token):
return token_key_frequency(keys.token_key(token))
def extract_tokens(tokens, string, boost):
els = list(preprocess(string))
if not els:
return
boost = config.DEFAULT_BOOST / len(els) * boost
for token in els:
if tokens.get(token, 0) < boost:
tokens[token] = boost
def index_tokens(pipe, tokens, key, **kwargs):
for token, boost in tokens.items():
pipe.zadd(keys.token_key(token), mapping={key: boost})
def deindex_field(key, string):
els = list(preprocess(string))
for s in els:
deindex_token(key, s)
return els
def deindex_token(key, token):
tkey = keys.token_key(token)
DB.zrem(tkey, key)
def index_documents(docs):
pipe = DB.pipeline(transaction=False)
for doc in docs:
if not doc:
continue
if doc.get('_action') in ['delete', 'update']:
key = keys.document_key(doc['_id']).encode()
known_doc = get_document(key)
if known_doc:
deindex_document(known_doc)
if doc.get('_action') in ['index', 'update', None]:
index_document(pipe, doc)
yield doc
try:
pipe.execute()
except redis.RedisError as e:
msg = 'Error while importing document:\n{}\n{}'.format(doc, str(e))
raise ValueError(msg)
def index_document(pipe, doc, **kwargs):
key = keys.document_key(doc['_id'])
tokens = {}
for indexer in config.INDEXERS:
try:
indexer.index(pipe, key, doc, tokens, **kwargs)
except ValueError as e:
print(e)
return # Do not index.
def deindex_document(doc, **kwargs):
key = keys.document_key(doc['_id'])
tokens = []
for indexer in config.INDEXERS:
indexer.deindex(DB, key, doc, tokens, **kwargs)
def index_geohash(pipe, key, lat, lon):
lat = float(lat)
lon = float(lon)
geoh = geohash.encode(lat, lon, config.GEOHASH_PRECISION)
geok = keys.geohash_key(geoh)
pipe.sadd(geok, key)
def deindex_geohash(key, lat, lon):
lat = float(lat)
lon = float(lon)
geoh = geohash.encode(lat, lon, config.GEOHASH_PRECISION)
geok = keys.geohash_key(geoh)
DB.srem(geok, key)
class FieldsIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
importance = (float(doc.get('importance', 0.0))
* config.IMPORTANCE_WEIGHT)
for field in config.FIELDS:
name = field['key']
values = doc.get(name)
if not values:
if not field.get('null', True):
# A mandatory field is null.
raise ValueError('{} must not be null'.format(name))
continue
if name != config.HOUSENUMBERS_FIELD:
boost = field.get('boost', config.DEFAULT_BOOST)
if callable(boost):
boost = boost(doc)
boost = boost + importance
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
extract_tokens(tokens, str(value), boost=boost)
index_tokens(pipe, tokens, key, **kwargs)
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
for field in config.FIELDS:
name = field['key']
if name == config.HOUSENUMBERS_FIELD:
continue
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
tokens.extend(deindex_field(key, value))
class GeohashIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
index_geohash(pipe, key, doc['lat'], doc['lon'])
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
deindex_geohash(key, doc['lat'], doc['lon'])
class HousenumbersIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
housenumbers = doc.get('housenumbers', {})
for number, data in housenumbers.items():
index_geohash(pipe, key, data['lat'], data['lon'])
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
housenumbers = doc.get('housenumbers', {})
for token, data in housenumbers.items():
deindex_geohash(key, data['lat'], data['lon'])
class FiltersIndexer:
@staticmethod
def index(pipe, key, doc, tokens, **kwargs):
for name in config.FILTERS:
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
pipe.sadd(keys.filter_key(name, value), key)
# Special case for housenumber type, because it's not a real type
if "type" in config.FILTERS and config.HOUSENUMBERS_FIELD \
and doc.get(config.HOUSENUMBERS_FIELD):
pipe.sadd(keys.filter_key("type", "housenumber"), key)
@staticmethod
def deindex(db, key, doc, tokens, **kwargs):
for name in config.FILTERS:
values = doc.get(name)
if values:
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
db.srem(keys.filter_key(name, value), key)
if "type" in config.FILTERS:
db.srem(keys.filter_key("type", "housenumber"), key)
@yielder
def prepare_housenumbers(doc):
# We need to have the housenumbers tokenized in the document, to match
# from user query (see results.match_housenumber).
if not doc:
return
housenumbers = doc.get(config.HOUSENUMBERS_FIELD)
if housenumbers:
doc['housenumbers'] = {}
for number, data in housenumbers.items():
# Housenumber may have multiple tokens (eg.: "dix huit").
token = ''.join(list(preprocess(number)))
data['raw'] = number
doc['housenumbers'][token] = data
return doc
| 29.986239 | 75 | 0.583601 | [
"MIT"
] | addok/addok | addok/helpers/index.py | 6,537 | Python |
import pandas as pd
import numpy as np
from pandas.util.testing import rands
groups = np.arange(10)
str_groups = np.array(list("0123456789"))
np.random.seed(1)
for size in [1e2, 1e3, 1e4, 1e5, 1e6]:
size = int(size)
g = np.random.choice(groups, size)
sg = np.random.choice(str_groups, size)
v = np.random.randn(size)
df = pd.DataFrame({"groups": g, "values": v, "str": sg})
df.to_csv(f"../data/{size}.csv", index=False)
print("data created")
# Join benchmark data
# https://wesmckinney.com/blog/high-performance-database-joins-with-pandas-dataframe-more-benchmarks/
# https://github.com/wesm/pandas/blob/23669822819808bbaeb6ea36a6b2ef98026884db/bench/bench_merge_sqlite.py
N = 10000
indices = np.array([rands(10) for _ in range(N)], dtype="O")
indices2 = np.array([rands(10) for _ in range(N)], dtype="O")
key = np.tile(indices[:8000], 10)
key2 = np.tile(indices2[:8000], 10)
left = pd.DataFrame({"key": key, "key2": key2, "value": np.random.randn(80000)})
right = pd.DataFrame(
{"key": indices[2000:], "key2": indices2[2000:], "value2": np.random.randn(8000)}
)
left.to_csv("../data/join_left_80000.csv", index=False)
right.to_csv("../data/join_right_80000.csv", index=False)
| 33.666667 | 106 | 0.693894 | [
"MIT"
] | koaning/polars | pandas_cmp/create_data.py | 1,212 | Python |
from jsonobject import *
class ReconstructableJsonObject(JsonObject):
@classmethod
def from_json(cls, data):
return cls._from_json(cls, data)
@classmethod
def _from_json(cls, root: JsonObject.__class__, data):
if root is None:
return data
for key, type in root._properties_by_attr.items():
if isinstance(type, (ListProperty,)) and key in data:
data[key] = [cls._from_json(getattr(type.item_wrapper, 'item_type', None), item) for item in
data[key]]
elif isinstance(type, (DictProperty,)) and key in data:
data[key] = {in_key: cls._from_json(getattr(type.item_wrapper, 'item_type', None), value) for
in_key, value
in
data[key].items()}
elif isinstance(type, (ObjectProperty,)) and key in data:
data[key] = cls._from_json(type.item_type, data[key])
if 'self' in data:
data['_self'] = data['self']
del data['self']
return root(**data)
class Links(ReconstructableJsonObject):
_self = StringProperty(name='self')
first = StringProperty(exclude_if_none=True)
related = StringProperty(exclude_if_none=True)
class Relationship(ReconstructableJsonObject):
links = ObjectProperty(Links)
class DataNode(ReconstructableJsonObject):
type = StringProperty()
id = StringProperty(exclude_if_none=True)
attributes = DictProperty()
links = ObjectProperty(Links, exclude_if_none=True)
relationships = DictProperty(Relationship, exclude_if_none=True)
class Meta(ReconstructableJsonObject):
page_count = IntegerProperty(name='page-count')
resource_count = IntegerProperty(name='resource-count')
class RootListDataNode(ReconstructableJsonObject):
data = ListProperty(DataNode)
links = ObjectProperty(Links)
meta = ObjectProperty(Meta)
class RootDataNode(ReconstructableJsonObject):
data = ObjectProperty(DataNode)
class PhoneNumber(ReconstructableJsonObject):
country = StringProperty(default='US')
number = StringProperty()
sms = BooleanProperty(default=False)
class Address(ReconstructableJsonObject):
street_1 = StringProperty(name='street-1')
street_2 = StringProperty(name='street-2')
postal_code = StringProperty(name='postal-code')
city = StringProperty()
region = StringProperty()
country = StringProperty()
KYC_DOCUMENT_TYPES = ["drivers_license", "government_id", "other", "passport", "residence_permit", "utility_bill"]
class KYCDocument(ReconstructableJsonObject):
contact_id = StringProperty(name='contact-id')
uploaded_document_id = StringProperty(name='uploaded-document-id')
backside_document_id = StringProperty(name='backside-document-id', exclude_if_none=True)
expires_on = StringProperty(name='expires-on', exclude_if_none=True)
identity = BooleanProperty(name='identity', exclude_if_none=True)
identity_photo = BooleanProperty(name='identity-photo', exclude_if_none=True)
proof_of_address = BooleanProperty(name='proof-of-address', exclude_if_none=True)
kyc_document_type = StringProperty(name='kyc-document-type',
choices=KYC_DOCUMENT_TYPES, default='drivers_license')
kyc_document_country = StringProperty(name='kyc-document-country', default='US')
class WebhookConfig(ReconstructableJsonObject):
account_id = StringProperty(name='account-id')
url = StringProperty(name='url')
shared_secret = StringProperty(name='shared-secret', exclude_if_none=True)
enabled = BooleanProperty(exclude_if_none=True)
contact_email = StringProperty(name='contact-email', exclude_if_none=True)
class Contact(ReconstructableJsonObject):
contact_type = StringProperty(name='contact-type', choices=['natural_person', 'company'], default='natural_person')
name = StringProperty(exclude_if_none=True)
email = StringProperty()
date_of_birth = StringProperty(name='date-of-birth', exclude_if_none=True)
sex = StringProperty(choices=['male', 'female', 'other'], exclude_if_none=True)
tax_id_number = StringProperty(name='tax-id-number', exclude_if_none=True)
tax_country = StringProperty(name='tax-country')
label = StringProperty(exclude_if_none=True)
primary_phone_number = ObjectProperty(PhoneNumber, name='primary-phone-number')
primary_address = ObjectProperty(Address, name='primary-address')
region_of_formation = StringProperty(name='region-of-formation', exclude_if_none=True)
related_contacts = ListProperty(ObjectProperty, name='related-contacts', exclude_if_none=True)
account_roles = ListProperty(StringProperty, name='account-roles')
Contact.related_contacts.item_wrapper._item_type = Contact
class FundTransferMethod(ReconstructableJsonObject):
bank_account_name = StringProperty(name='bank-account-name')
routing_number = StringProperty(name='routing-number', exclude_if_none=True)
ip_address = StringProperty(name='ip-address')
bank_account_type = StringProperty(name='bank-account-type', exclude_if_none=True)
bank_account_number = StringProperty(name='bank-account-number', exclude_if_none=True)
ach_check_type = StringProperty(name='ach-check-type')
funds_transfer_type = StringProperty(name='funds-transfer-type')
plaid_public_token = StringProperty(name='plaid-public-token', exclude_if_none=True)
plaid_account_id = StringProperty(name='plaid-account-id', exclude_if_none=True)
class AccountQuestionnaire(ReconstructableJsonObject):
nature_of_business_of_the_company = StringProperty(name='nature-of-business-of-the-company')
purpose_of_account = StringProperty(name='purpose-of-account')
source_of_assets_and_income = StringProperty(name='source-of-assets-and-income')
intended_use_of_account = StringProperty(name='intended-use-of-account')
anticipated_monthly_cash_volume = StringProperty(name='anticipated-monthly-cash-volume')
anticipated_monthly_transactions_incoming = StringProperty(name='anticipated-monthly-transactions-incoming')
anticipated_monthly_transactions_outgoing = StringProperty(name='anticipated-monthly-transactions-outgoing')
anticipated_types_of_assets = StringProperty(name='anticipated-types-of-assets')
anticipated_trading_patterns = StringProperty(name='anticipated-trading-patterns')
associations_with_other_accounts = StringProperty(name='associations-with-other-accounts')
| 44.544218 | 119 | 0.738241 | [
"MIT"
] | amitassaraf/py-prime-trust | src/primetrust/models.py | 6,548 | Python |
# Config
NODE_ID = ${NODE_ID}
# hour,set 0 to disable
SPEEDTEST = ${SPEEDTEST}
CLOUDSAFE = ${CLOUDSAFE}
ANTISSATTACK = ${ANTISSATTACK}
AUTOEXEC = ${AUTOEXEC}
MU_SUFFIX = "${MU_SUFFIX}"
MU_REGEX = "${MU_REGEX}"
SERVER_PUB_ADDR = "127.0.0.1" # mujson_mgr need this to generate ssr link
API_INTERFACE = "${API_INTERFACE}" # glzjinmod, modwebapi
WEBAPI_URL = "${WEBAPI_URL}"
WEBAPI_TOKEN = "${WEBAPI_TOKEN}"
# mudb
MUDB_FILE = 'mudb.json'
# Mysql
MYSQL_HOST = "${MYSQL_HOST}"
MYSQL_PORT = ${MYSQL_PORT}
MYSQL_USER = "${MYSQL_USER}"
MYSQL_PASS = "${MYSQL_PASS}"
MYSQL_DB = "${MYSQL_DB}"
MYSQL_SSL_ENABLE = 0
MYSQL_SSL_CA = ''
MYSQL_SSL_CERT = ''
MYSQL_SSL_KEY = ''
# API
API_HOST = '127.0.0.1'
API_PORT = 80
API_PATH = '/mu/v2/'
API_TOKEN = 'abcdef'
API_UPDATE_TIME = 60
# Manager (ignore this)
MANAGE_PASS = 'ss233333333'
# if you want manage in other server you should set this value to global ip
MANAGE_BIND_IP = '127.0.0.1'
# make sure this port is idle
MANAGE_PORT = 23333
| 20.93617 | 75 | 0.707317 | [
"Apache-2.0"
] | topjohncian/cian-ssrmu | apiconfig.py | 984 | Python |
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from .result import ExperimentResult
from .result import CVExperimentResult
from ..metrics.rating import RatingMetric
from ..metrics.ranking import RankingMetric
from ..models.recommender import Recommender
class Experiment:
""" Experiment Class
Parameters
----------
eval_method: :obj:`<cornac.eval_methods.BaseMethod>`, required
The evaluation method (e.g., RatioSplit).
models: array of :obj:`<cornac.models.Recommender>`, required
A collection of recommender models to evaluate, e.g., [C2PF, HPF, PMF].
metrics: array of :obj:{`<cornac.metrics.RatingMetric>`, `<cornac.metrics.RankingMetric>`}, required
A collection of metrics to use to evaluate the recommender models, \
e.g., [NDCG, MRR, Recall].
user_based: bool, optional, default: True
This parameter is only useful if you are considering rating metrics. When True, first the average performance \
for every user is computed, then the obtained values are averaged to return the final result.
If `False`, results will be averaged over the number of ratings.
result: array of :obj:`<cornac.experiment.result.Result>`, default: None
This attribute contains the results per-model of your experiment, initially it is set to None.
"""
def __init__(self, eval_method, models, metrics, user_based=True, verbose=False):
self.eval_method = eval_method
self.models = self._validate_models(models)
self.metrics = self._validate_metrics(metrics)
self.user_based = user_based
self.verbose = verbose
self.result = None
@staticmethod
def _validate_models(input_models):
if not hasattr(input_models, "__len__"):
raise ValueError('models have to be an array but {}'.format(type(input_models)))
valid_models = []
for model in input_models:
if isinstance(model, Recommender):
valid_models.append(model)
return valid_models
@staticmethod
def _validate_metrics(input_metrics):
if not hasattr(input_metrics, "__len__"):
raise ValueError('metrics have to be an array but {}'.format(type(input_metrics)))
valid_metrics = []
for metric in input_metrics:
if isinstance(metric, RatingMetric) or isinstance(metric, RankingMetric):
valid_metrics.append(metric)
return valid_metrics
def _create_result(self):
from ..eval_methods.cross_validation import CrossValidation
if isinstance(self.eval_method, CrossValidation):
self.result = CVExperimentResult()
else:
self.result = ExperimentResult()
def run(self):
self._create_result()
for model in self.models:
model_result = self.eval_method.evaluate(model=model,
metrics=self.metrics,
user_based=self.user_based)
self.result.append(model_result)
print('\n{}'.format(self.result))
| 41.043478 | 119 | 0.658633 | [
"Apache-2.0"
] | linksboy/cornac | cornac/experiment/experiment.py | 3,776 | Python |
import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_id_annotation_request import TextIdAnnotationRequest # noqa: E501
from openapi_server.models.text_id_annotation_response import TextIdAnnotationResponse # noqa: E501
def create_text_id_annotations(text_id_annotation_request=None): # noqa: E501
"""Annotate IDs in a clinical note
Return the ID annotations found in a clinical note # noqa: E501
:param text_id_annotation_request:
:type text_id_annotation_request: dict | bytes
:rtype: TextIdAnnotationResponse
"""
if connexion.request.is_json:
try:
annotation_request = TextIdAnnotationRequest.from_dict(connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.ID)
res = TextIdAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| 39.16129 | 110 | 0.737232 | [
"Apache-2.0"
] | cascadianblue/phi-annotator | server/openapi_server/controllers/text_id_annotation_controller.py | 1,214 | Python |
#!/usr/bin/env python3
self_description = """
gridradar2influx is a tiny daemon written to fetch data from the gridradar.net-API and
writes it to an InfluxDB instance.
"""
# import standard modules
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import configparser
import logging
import os
import signal
import time
from datetime import datetime
# import 3rd party modules
import requests
import influxdb
#import functions from files
from app_functions import *
from basic_functions import *
from influx import *
__version__ = "0.0.1"
__version_date__ = "2022-02-05"
__description__ = "gridradar2influx"
__license__ = "MIT"
# default vars
running = True
default_config = os.path.join(os.path.dirname(__file__), 'config.ini')
default_log_level = logging.INFO
def main():
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
# parse command line arguments
args = parse_args()
# set logging
log_level = logging.DEBUG if args.verbose is True else default_log_level
if args.daemon:
# omit time stamp if run in daemon mode
logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s')
else:
logging.basicConfig(level=log_level, format='%(asctime)s - %(levelname)s: %(message)s')
# read config from ini file
config = read_config(args.config_file)
# set up influxdb handler
influxdb_client = None
try:
influxdb_client = influxdb.InfluxDBClient(
config.get('influxdb', 'host'),
config.getint('influxdb', 'port', fallback=8086),
config.get('influxdb', 'username'),
config.get('influxdb', 'password'),
config.get('influxdb', 'database'),
config.getboolean('influxdb', 'ssl', fallback=False),
config.getboolean('influxdb', 'verify_ssl', fallback=False)
)
measurement_name=config.get('influxdb', 'measurement_name')
location=config.get('influxdb', 'location')
# test more config options and see if they are present
#_ = config.get('influxdb', 'measurement_name')
except configparser.Error as e:
logging.error("Config Error: %s", str(e))
exit(1)
except ValueError as e:
logging.error("Config Error: %s", str(e))
exit(1)
# check influx db status
check_db_status(influxdb_client, config.get('influxdb', 'database'))
# create authenticated gridradar-api client handler
api_response = None
result_dict={}
request_interval = 60
try:
request_interval = config.getint('gridradar', 'interval', fallback=60)
url=config.get('gridradar', 'url')
token=config.get('gridradar', 'token')
api_response=getdatafromapi(url,token,{}) # blank request to check, if authentification works
except configparser.Error as e:
logging.error("Config Error: %s", str(e))
exit(1)
except BaseException as e:
logging.error("Failed to connect to gridradar-API '%s'" % str(e))
exit(1)
# test connection
try:
api_response
except requests.exceptions.RequestException as e:
if "401" in str(e):
logging.error("Failed to connect to gridradar-API '%s' using credentials. Check token!" %
config.get('gridradar', 'token'))
if "404" in str(e):
logging.error("Failed to connect to gridradar-API '%s' using credentials. Check url!" %
config.get('gridradar', 'url'))
else:
logging.error(str(e))
exit(1)
logging.info("Successfully connected to gridradar-API")
# read services from config file
###services_to_query = get_services(config, "service")
logging.info("Starting main loop - wait until first API-Request '%s' seconds",request_interval)
while running:
logging.debug("Starting gridradar-API requests")
time.sleep(request_interval) # wait, otherwise Exception 429, 'Limitation: maximum number of requests per second exceeded']
request=str2dict(config.get('gridradar', 'request_freq'))
duration=grapi2influx(request,influxdb_client,config)
# just sleep for interval seconds - last run duration
for _ in range(0, int(((request_interval * 1000) - duration) / 100)):
if running is False:
break
time.sleep(0.0965)
request=str2dict(config.get('gridradar', 'request_net_time'))
duration=grapi2influx(request,influxdb_client,config)
# just sleep for interval seconds - last run duration
for _ in range(0, int(((request_interval * 1000) - duration) / 100)):
if running is False:
break
time.sleep(0.0965)
if __name__ == "__main__":
main()
| 34.323944 | 131 | 0.650185 | [
"MIT"
] | Wuifi/gridradar2influx | gridradar2influx.py | 4,874 | Python |
class Developer:
def __init__(self,name):
self.name = name
def coding(self):
print(self.name+' is developer!')
class PythonDevloper(Developer):
def coding(self):
print(self.name + ' is Python developer!')
class JavaDevloper(Developer):
def coding(self):
print(self.name + ' is Java developer!')
class CPPDevloper(Developer):
def coding(self):
print(self.name + ' is C++ developer!')
dev1 = PythonDevloper('Chris')
dev2 = JavaDevloper('Jason')
dev3 = CPPDevloper('Bryan')
dev1.coding()
dev2.coding()
dev3.coding()
| 21.444444 | 50 | 0.651123 | [
"MIT"
] | min9288/Multicampus | Python/python_programming_stu/chapter09_class/9-5.developer_polymorphism.py | 579 | Python |
import logging
import odoo.http
from odooku.request import WebRequestMixin
_logger = logging.getLogger(__name__)
class WebSocketRequest(WebRequestMixin, odoo.http.WebRequest):
def __init__(self, httprequest):
super(WebSocketRequest, self).__init__(httprequest)
def dispatch(self):
raise NotImplementedError()
class WebSocketRpcRequest(WebSocketRequest):
_request_type = 'json'
def __init__(self, httprequest, data):
super(WebSocketRpcRequest, self).__init__(httprequest)
self.params = data.get('params', {})
self.id = data.get('id')
self.context = self.params.pop('context', dict(self.session.context))
def dispatch(self):
try:
result = self._call_function(**self.params)
except Exception as exception:
return self._handle_exception(exception)
return self._json_response(result)
def _json_response(self, result=None, error=None):
response = {
'jsonrpc': '2.0',
'id': self.id
}
if error is not None:
response['error'] = error
if result is not None:
response['result'] = result
return response
def _handle_exception(self, exception):
"""Called within an except block to allow converting exceptions
to arbitrary responses. Anything returned (except None) will
be used as response."""
try:
return super(WebSocketRpcRequest, self)._handle_exception(exception)
except Exception:
if not isinstance(exception, (odoo.exceptions.Warning, odoo.http.SessionExpiredException, odoo.exceptions.except_orm)):
_logger.exception("Exception during JSON request handling.")
error = {
'code': 200,
'message': "Odoo Server Error",
'data': odoo.http.serialize_exception(exception)
}
if isinstance(exception, odoo.http.AuthenticationError):
error['code'] = 100
error['message'] = "Odoo Session Invalid"
if isinstance(exception, odoo.http.SessionExpiredException):
error['code'] = 100
error['message'] = "Odoo Session Expired"
return self._json_response(error=error)
| 33.898551 | 131 | 0.619496 | [
"Apache-2.0"
] | 12thmar/marodooku | odooku/services/websocket/requests.py | 2,339 | Python |
#!/usr/bin/env python3
from itertools import product
if __name__ == "__main__":
arr1 = list(map(int, input().strip().split(' ')))
arr2 = list(map(int, input().strip().split(' ')))
for el in product(arr1, arr2):
print("{} ".format(el), end='')
| 24.909091 | 53 | 0.569343 | [
"MIT"
] | 1BM18CS069/HackerRank | python/itertools-product.py | 274 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
# URL pattern for the UserListView
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
# URL pattern for the UserRedirectView
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
# URL pattern for the UserDetailView
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
# URL pattern for the UserUpdateView
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
| 20.552632 | 56 | 0.581306 | [
"MIT"
] | jondelmil/artinvestor-server | artinvestor_server/users/urls.py | 781 | Python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/get_inventory_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_inventory_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nCpogoprotos/networking/requests/messages/get_inventory_message.proto\x12\'pogoprotos.networking.requests.messages\"0\n\x13GetInventoryMessage\x12\x19\n\x11last_timestamp_ms\x18\x01 \x01(\x03\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETINVENTORYMESSAGE = _descriptor.Descriptor(
name='GetInventoryMessage',
full_name='pogoprotos.networking.requests.messages.GetInventoryMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_timestamp_ms', full_name='pogoprotos.networking.requests.messages.GetInventoryMessage.last_timestamp_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=160,
)
DESCRIPTOR.message_types_by_name['GetInventoryMessage'] = _GETINVENTORYMESSAGE
GetInventoryMessage = _reflection.GeneratedProtocolMessageType('GetInventoryMessage', (_message.Message,), dict(
DESCRIPTOR = _GETINVENTORYMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_inventory_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetInventoryMessage)
))
_sym_db.RegisterMessage(GetInventoryMessage)
# @@protoc_insertion_point(module_scope)
| 33.785714 | 228 | 0.796195 | [
"MIT"
] | 123FLO321/pgoapi | pgoapi/protos/pogoprotos/networking/requests/messages/get_inventory_message_pb2.py | 2,365 | Python |
import app.constants as const
class Config:
def __init__(self, shape=const.DEFAULT_CONFIG_SHAPE,
size=const.DEFAULT_CONFIG_SIZE,
max_thick=const.MAXIMUM_CONFIG_THICKNESS,
min_thick=const.MINIMUM_CONFIG_THICKNESS,
use_border=const.DEFAULT_CONFIG_BORDER,
border_thick=const.DEFAULT_CONFIG_BORDER_THICKNESS,
curve=const.DEFAULT_CONFIG_CURVE,
stl_format=const.DEFAULT_CONFIG_FORMAT):
self.shape = shape
self.size = size
self.max_thickness = max_thick
self.min_thickness = min_thick
self.use_border = use_border
self.border_thickness = border_thick
self.curve = curve
self.format = stl_format
def get_config(self):
return self
| 34.333333 | 68 | 0.643204 | [
"MIT"
] | calemolech/lithophanes | app/config.py | 824 | Python |
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2020 Hnaynag University (Jae-Hong Lee)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import json
import logging
import re
import random
from pathlib import Path
from tqdm import tqdm
from nltk import tokenize
from espnet.utils.cli_utils import get_commandline_args
def error_checker(keys, file_path, log_path):
buffer_key = None
past_key = None
total_key_count = len(keys)
skip_key_count = 0
with open(file_path, encoding="utf-8") as f:
for line in tqdm(f.readlines()):
sps = line.rstrip().split(maxsplit=1)
if len(sps) == 2:
key, value = sps
if key in keys:
past_key = key
else:
if buffer_key != past_key:
keys.remove(past_key)
skip_key_count += 1
buffer_key = past_key
else:
pass
logging.info(f"Skip ratio is {skip_key_count / total_key_count}")
return keys
def get_parser():
parser = argparse.ArgumentParser(
description="TTT json to text",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("json", type=str, help="json files")
parser.add_argument("dest", type=str, help="output file path")
parser.add_argument("prep", type=int, help="flag of preprocessing", default=False)
parser.add_argument("total_offset", type=int, help="", default=100)
parser.add_argument("max_snt_len", type=int, help="", default=150)
parser.add_argument("max_para_len", type=int, help="", default=1600)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
# logging info
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
logging.info("reading %s", args.json)
with codecs.open(args.json, "r", encoding="utf-8") as f:
j = json.load(f)
dest = Path(args.dest)
# Remove the duplicated keys and load the json to the dict
prep_j = {}
for line in tqdm(j):
try:
prep_j[line['id']] = {'paragraph': line['paragraph'], 'sentence': line['sentence']}
except:
logging.warning("The key %s is duplicated with the exsisted key", line['id'])
# Eliminate the error key with python readlines function
# FIXME(j-ppng): These lines is fixed by python reading error cleaner.
# However, we needs to more specific text cleaner
if args.prep:
keys = [k for k in prep_j.keys()]
logging.info("writing train_origin to %s", str(dest))
train_txt = codecs.open(dest / "text_orig", "w", encoding="utf-8")
for key in tqdm(keys):
train_txt.write(key + " " + prep_j[key]['paragraph'] + "\n")
keys = error_checker(keys,
dest / "text_orig",
dest / "error.log")
logging.info("writing key_file to %s", str(dest))
key_file = codecs.open(dest / "keys", "w", encoding="utf-8")
for key in keys:
key_file.write(key + "\n")
else:
keys = []
with open(dest / "keys", encoding="utf-8") as f:
for key in f.readlines():
keys.append(key.replace("\n", ""))
new_keys = []
total_offset = args.total_offset
max_snt_len = args.max_snt_len
max_para_len = args.max_para_len
for key in tqdm(keys):
# find and clipping preprocessing
# On the first try, we applied these procedures to the middle of the collect_stats process.
# However, we found that the {feat}_shape file saves the static size of the features,
# and we can know the features shape error will occur when at the training process.
idx = prep_j[key]['paragraph'].find(prep_j[key]['sentence'])
offset = random.randint(0, total_offset)
sent_len = len(prep_j[key]['sentence'])
# calculate the offset for the clip with the centroid which sentence in the paragraph.
prior_offset = max(idx - offset, 0)
post_offset = idx + sent_len + (total_offset - offset)
# clip the new paragraph area in the paragraph with the offsets.
selected_para = prep_j[key]['paragraph'][prior_offset:post_offset]
para_len = len(selected_para)
if para_len < sent_len:
raise RuntimeError(f"prior_offeset: {prior_offset}, post_offset: {post_offset}, length: {para_len}")
prep_j[key]['paragraph'] = selected_para
# remove key of the long sentence/paragraph
if sent_len < max_snt_len and para_len < max_para_len:
new_keys.append(key)
logging.info(f"Removed key raio is {1-len(new_keys)/len(keys)}")
keys = new_keys
# Save the results
logging.info("writing train.txt to %s", str(dest))
train_txt = codecs.open(dest / "text", "w", encoding="utf-8")
for key in tqdm(keys):
train_txt.write(prep_j[key]['paragraph'] + "\n")
logging.info("writing train and valid text to %s", str(dest))
split_point = int(len(keys) * 0.9)
datasets = {'train': keys[:split_point], 'valid': keys[split_point:]}
for dataset in datasets.keys():
logging.info("writing ref trn to %s", str(dest / Path(dataset)))
input_text = codecs.open(dest / Path(dataset) / "text_input", "w", encoding="utf-8")
output_text = codecs.open(dest / Path(dataset) / "text_output", "w", encoding="utf-8")
for key in tqdm(datasets[dataset]):
input_text.write(key + " " + prep_j[key]['paragraph'] + "\n")
output_text.write(key + " " + prep_j[key]['sentence'] + "\n")
# If want to check the error of data, just use these lines.
# error_checker(keys,
# dest / Path(dataset) / "text_input",
# dest / Path(dataset) / "error.log") | 38.670886 | 112 | 0.609165 | [
"Apache-2.0"
] | j-pong/HYnet2-summachine | egs/linersum/asr1/local/data_prep.py | 6,110 | Python |
'''define the config file for voc and resnet101os16'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'voc',
'set': 'trainaug',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
DATASET_CFG['test'].update(
{
'type': 'voc',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'annnet_resnet101os16_voc_train',
'logfilepath': 'annnet_resnet101os16_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'annnet_resnet101os16_voc_test',
'logfilepath': 'annnet_resnet101os16_voc_test/test.log',
'resultsavepath': 'annnet_resnet101os16_voc_test/annnet_resnet101os16_voc_results.pkl'
}
) | 24.274194 | 94 | 0.627243 | [
"MIT"
] | skydengyao/sssegmentation | ssseg/cfgs/annnet/cfgs_voc_resnet101os16.py | 1,505 | Python |
# Write a Python function to sum all the numbers in a list
# Sample List : [8, 2, 3, 0, 7]
# Expected Output : 20
def sum_list(list):
sum = 0
for i in list:
sum += i
return sum
list = [8, 2, 3, 0, 7]
print(sum_list(list)) | 17.5 | 58 | 0.587755 | [
"Apache-2.0"
] | kevorkkeheian/learn-python | introduction/exercise/ex9.py | 245 | Python |
from django.apps import AppConfig
class SignalsConfig(AppConfig):
name = 'signals.apps.signals'
verbose_name = 'Signals'
def ready(self):
# Import Django signals to connect receiver functions.
import signals.apps.signals.signal_receivers # noqa
| 25.181818 | 62 | 0.714801 | [
"MPL-2.0"
] | CBuiVNG/signals | api/app/signals/apps/signals/config.py | 277 | Python |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""tf2onnx.onnx_opset module"""
from . import common, controlflow, generator, logical, math, misc, nn, quantize, reduction, rnn, tensor, traditionalml
| 41 | 118 | 0.764228 | [
"MIT"
] | NikolasMarkou/tensorflow-onnx | tf2onnx/onnx_opset/__init__.py | 246 | Python |
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from .warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| 32.75641 | 106 | 0.604501 | [
"Apache-2.0"
] | WenmuZhou/crnn.pytorch | data_loader/modules/Text_Image_Augmentation_python/augment.py | 5,110 | Python |
class Student:
def __init__(self):
self.surname = None
self.name = None
self.patronymic = None
self.age = 19
self.birthday = None
self.group = None
class Teacher:
def __init__(self):
self.surname = None
self.name = None
self.patronymic = None
self.age = None
self.education = None
self.experience = None
self.discipline = None
class StudyGroup:
def __init__(self):
self.number = None
self.progress = None
self.specialty = None
self.mark = None
class College:
def __init__(self):
self.abbreviation = None
self.discipline = None
self.license = None
class Exam:
def __init__(self):
self.subject = None
self.mark = None
self.teacher = None
class StudentOnExam:
def __init__(self):
self.student = None
self.mark = None
self.teacher = None
class Car:
def __init__(self):
self.engine = None
self.color = 'white'
self.brand = None
self.mileage = None
user_1 = Student()
user_1.surname = 'Рычкова'
print(user_1, 'surname:', user_1.surname, 'age:', user_1.age, 'birthday:', user_1.birthday)
user_1.age = 20
user_1.birthday = '20.20.2000'
print(user_1, 'surname:', user_1.surname, 'age:', user_1.age, 'birthday:', user_1.birthday)
user_2 = Car()
user_2.brand = 'Toyota'
user_2.mileage = '42141'
print(user_2, user_2.brand, user_2.mileage, user_2.color)
user_2.engine = 2.0
print(user_2, user_2.brand, user_2.mileage, user_2.engine)
| 21.986301 | 91 | 0.611838 | [
"MIT"
] | Floou/python-basics | 201005/home_task.py | 1,612 | Python |
"""
Examples of loading all information about an object or set of objects from the
database.
"""
from __future__ import absolute_import
from __future__ import print_function
from owmeta_core.context import Context
from owmeta_core.command import OWM
from owmeta.connection import Connection
from owmeta.neuron import Neuron
def pp_connection(conn):
print(conn.pre_cell(), conn.post_cell(), conn.syntype(), conn.synclass(), conn.number())
with OWM('../.owm').connect() as owmconn:
ctx = Context(ident="http://openworm.org/data", conf=owmconn.conf).stored
query_object = ctx(Connection)(pre_cell=ctx(Neuron).query(name='AVAL'))
print('STARTING WITH AVAL')
for x in query_object.load():
pp_connection(x)
print()
print('STARTING WITH PVCL')
query_object = ctx(Connection)(pre_cell=ctx(Neuron).query(name='PVCL'))
for x in query_object.load():
pp_connection(x)
print()
print('NEURONS')
query_object = ctx(Neuron).query()
# sometimes a neuron object with the same name is returned more than once
names = dict()
for x in query_object.load():
n = x.name()
if n not in names:
names[n] = dict()
print(n)
print()
print('NEIGHBORS of PVCL')
query_object = ctx(Neuron).query(name='PVCL')
for x in query_object.neighbor():
print(x.name())
print()
print('NEIGHBORS of AVAL with number=3 connections')
query_object = ctx(Neuron).query(name='AVAL')
for x in query_object.neighbor.get(number=3):
print(x.name())
print
print('NEURONS and their RECEPTORS')
for x in ctx(Neuron).query().load():
# Wrap in a try-block in case there are no receptors listed
print(x, end=' ')
try:
for r in x.receptor():
print(' ', r, end=' ')
except StopIteration:
pass
print()
| 30.66129 | 92 | 0.643346 | [
"MIT"
] | cheelee/owmeta | examples/test_bgp.py | 1,901 | Python |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
from test_framework.util import *
import io
import time
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-debug", "-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.1 ltc (10,000,000 satoshis)
print("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.1)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("2.5"))
print("Running tests")
dest_address = peer_node.getnewaddress()
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
print("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# RECte a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.1"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
version=0,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.09"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.05"),
get_change_address(rbf_node): Decimal("0.03")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.090000})
assert_raises_message(JSONRPCException, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.1")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_message(JSONRPCException, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.050000})
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_message(JSONRPCException, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
rbf_node.bumpfee(rbfid, {"totalFee": 2000000})
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
assert_raises_message(JSONRPCException, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 2000001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=9900, but it converts to 10,000
rbfid = spend_one_input(rbf_node,
Decimal("0.100000"),
{dest_address: 0.080000,
get_change_address(rbf_node): Decimal("0.010000")})
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 1990000})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.020000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
# increase feerate by 2.5x, test that fee increased at least 2x
rbf_node.settxfee(Decimal("0.001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)
rbf_node.settxfee(Decimal("0.002500"))
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] > 2 * abs(rbftx["fee"])
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbf_node.settxfee(Decimal("0.001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 100000})
assert_raises_message(JSONRPCException, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 200000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 200000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 100000, "replaceable": False})
assert_raises_message(JSONRPCException, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 200000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = create_fund_sign_send(rbf_node, {rbf_node_address: 0.090000})
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, 0.090000, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.090000})
rbf_node.walletlock()
assert_raises_message(JSONRPCException, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def create_fund_sign_send(node, outputs):
rawtx = node.createrawtransaction([], outputs)
fundtx = node.fundrawtransaction(rawtx)
signedtx = node.signrawtransaction(fundtx["hex"])
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def spend_one_input(node, input_amount, outputs):
input = dict(sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == input_amount))
rawtx = node.createrawtransaction([input], outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def get_change_address(node):
"""Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address."""
dest_address = node.getnewaddress()
dest_amount = Decimal("0.012345")
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx["hex"])
return next(address for out in info["vout"]
if out["value"] != dest_amount for address in out["scriptPubKey"]["addresses"])
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
error = node.submitblock(bytes_to_hex_str(block.serialize(True)))
if error is not None:
raise Exception(error)
return block
if __name__ == "__main__":
BumpFeeTest().main()
| 44.629969 | 121 | 0.71036 | [
"MIT"
] | citrixrep/rec | qa/rpc-tests/bumpfee.py | 14,594 | Python |
#!/usr/bin/env python3
def main():
pattern = input().upper()
genome = input().upper()
mismatches = int(input())
occurrences = approximate_occurrences(genome, pattern, mismatches)
for o in occurrences:
print(o, end=' ')
print()
LIST_A = ['C', 'T', 'G']
LIST_C = ['A', 'T', 'G']
LIST_T = ['C', 'A', 'G']
LIST_G = ['C', 'T', 'A']
def _generate_immediate_neighbours(pattern: str) -> list:
"""
Generate immediate (different by one mismatch) neighbours of the given genome pattern
:param pattern: a pattern to examine
:return: neighbourhood, NOT including the given pattern
"""
generated = []
for i in range(len(pattern)):
if pattern[i] == 'A':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])
elif pattern[i] == 'C':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])
elif pattern[i] == 'T':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])
elif pattern[i] == 'G':
generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])
return generated
def generate_neighbours(pattern: str, mismatches: int) -> set:
"""
Generate neighbours for the given pattern (genome string)
:param pattern: genome pattern
:param mismatches: number of mismatches to generate neighbours
:return: a set of patterns in the neighbourhood, including the 'pattern' itself
"""
neighbourhood = set()
neighbourhood.add(pattern)
curr_patterns = [pattern]
next_patterns = []
for curr_mismatches in range(mismatches):
for curr_pattern in curr_patterns:
for neighbour in _generate_immediate_neighbours(curr_pattern):
if neighbour not in neighbourhood:
neighbourhood.add(neighbour)
next_patterns.append(neighbour)
curr_patterns = next_patterns
next_patterns = []
return neighbourhood
def approximate_occurrences(genome: str, pattern: str, mismatches: int) -> list:
neighbours = generate_neighbours(pattern, mismatches)
occurrences = set()
for neighbour in neighbours:
search_start = 0
while search_start <= len(genome) - len(pattern):
index_found = genome.find(neighbour, search_start)
if index_found == -1:
break
occurrences.add(index_found)
search_start = index_found + 1
return sorted(list(occurrences))
if __name__ == '__main__':
main()
| 30.258824 | 89 | 0.618585 | [
"MIT"
] | leskin-in/mipt-bioalgo | hw1/approximate_occurrences.py | 2,572 | Python |
# -*- coding: utf-8 -*-
'''
Runs MultiprocessTest with all warnings including traceback...
'''
#
# https://stackoverflow.com/questions/22373927/get-traceback-of-warnings
import traceback
import warnings
import sys
from . import multiprocess
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
def main(test_group=None):
warnings.showwarning = warn_with_traceback
warnings.simplefilter("always")
multiprocess.main(test_group)
if __name__ == '__main__':
main()
| 24.571429 | 83 | 0.741279 | [
"BSD-3-Clause"
] | dhmit/dh_testers | dh_testers/warningMultiprocess.py | 688 | Python |
version = '0.1.1'
title = 'Cloud::Auth'
api_version = 'v1'
api_prefix = '/api/' + api_version
# $ echo -n 'Once upon a time...' | openssl.exe dgst -sha256
# (stdin)= 7cc6caf901b894033626981cd102021727aa59c2548d79e59382649b2c6f50f2
ADMIN_TOKEN = 'd7981fb00d6f071e1a8b454c47b378d815b53541621e22dc4b3dbf5a6b9c8b1d'
USER_TOKEN = '4d07df1ebd8e23eb48dbcfdde93452d1392c9b890ef3a3b82dc05ff9f5ff8d19'
| 32.916667 | 80 | 0.802532 | [
"MIT"
] | sergio-rudenko/cloudauth | src/app/conf.py | 395 | Python |
from __future__ import division
from __future__ import print_function
from __future__ import with_statement
from replacers import *
import pandas as pd
import nltk
import subprocess
def findFreqWord(fuzzyDF):
f1 = fuzzyDF # pd.read_csv("SubmittedCSV/fuzzy.csv")
f2 = pd.DataFrame(columns=['Tweets', 'Classified', 'FreqWord'])
f3 = pd.read_csv("SubmittedCSV/fuzzyptag.csv", )
pop_list = list(f3.iloc[:, 0])
for zero_cl_row in range(f1.__len__()):
row = 1
found = False
splitted_sentence = f1.iloc[zero_cl_row, 0].split()
print(splitted_sentence)
for tag in pop_list:
print("Popular tags:", pop_list)
for word in splitted_sentence:
if word in tag and f1.iloc[zero_cl_row, 1] == "Highly Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Highly Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Positive":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Negative":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
else:
print("Unmatched")
if not found:
print("NO")
f2.to_csv("SubmittedCSV/fuzzyfreq.csv", index=False)
try:
subprocess.call(['libreoffice','--calc','SubmittedCSV/fuzzyfreq.csv'])
except OSError:
print("Works with DEBIAN OS & LIBREOFFICE 5 only \n Use MS Excel or equivalent Software to open : "
"SubmittedCSV/fuzzyfreq.csv")
return f2
def pivotTable():
pass
# ---------------------------------- SUBMITTED LOGIC - TEST CASE
# ---------------------------------- #01 UNIT TESTING FAILED ##10, 11, 27, 30
# ---------------------------------- #02 LOGICAL GLITCH
# ---------------------------------- #03 COMPLIANCE MISUSE
# ---------------------------------- #04 MEMDUMP DETECTED
# ---------------------------------- #05 UNUSED OBJECTS, MEMORY BLOCK 0x0008
# for hosts_row in f1:
# row = 1
# found = False
# # t1=nltk.word_tokenize(hosts_row[0])
# t1 = hosts_row.split()
# print("t1=", t1)
# for master_row in pop_list:
# print("popular tags=", pop_list)
# for word in t1:
#
# if word == master_row[0] and hosts_row[1] == "Highly Positive":
# # >>> master_row[0] # Logical glitch, value uncompilable
# # 'b'
# f2.write(str(hosts_row[1]) + "," + word) # Will always look for 1st element of string
# # >>> hosts_row
# # ' neville rooney end ever tons trophy drought httpcocryingeyesjebfkdp,Positive\r\n'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
#
# elif word == master_row[0] and hosts_row[1] == "Highly Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# # >>> master_row[0]
# # 'business'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
#
# # print count
# if not found:
# print("no")
#
# print(count)
# f1.close()
# f2.close()
| 42.404412 | 114 | 0.469221 | [
"Apache-2.0"
] | 1MT3J45/ML-DroughtAnalysisNLP | freqWordSelection.py | 5,767 | Python |
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from distsup import utils
from distsup.configuration import config_utils
def get_val(dictionary, key, dict_name):
if key not in dictionary:
raise KeyError('%s has no %s key specified' % (dict_name, key))
return dictionary[key]
class ConfigInstantiator(object):
def __init__(self, objects_config, default_class_dict={},
default_modules_dict={}, name='', **kwargs):
super(ConfigInstantiator, self).__init__(**kwargs)
self.objects_config = objects_config
self.default_class_dict = default_class_dict
self.default_modules_dict = default_modules_dict
self.cache = {}
self.name = name
def keys(self):
return self.objects_config.keys()
def _getitem(self, key, additional_parameters=None):
if key not in self.cache:
# make a copy since we may change the dict in the end
opts = dict(get_val(self.objects_config, key, self.name))
if 'class_name' not in opts:
opts['class_name'] = self.default_class_dict[key]
self.cache[key] = utils.construct_from_kwargs(
opts, self.default_modules_dict.get(key),
additional_parameters)
return self.cache[key]
def __getitem__(self, key):
return self._getitem(key)
class DatasetConfigInstantiator(ConfigInstantiator):
def _getitem(self, key, additional_parameters=None):
if key not in self.cache:
# make a copy since we may change the dict in the end
opts = dict(get_val(self.objects_config, key, self.name))
if 'class_name' not in opts:
opts['class_name'] = self.default_class_dict[key]
self.cache[key] = utils.construct_from_kwargs(
opts, self.default_modules_dict.get(key),
additional_parameters)
return self.cache[key]
class _ConstantDict(object):
def __init__(self, v, **kwargs):
super(_ConstantDict, self).__init__(**kwargs)
self.v = v
def __getitem__(self, k):
return self.v
def get(self, k, v=None):
return self.v
class Configuration(ConfigInstantiator):
"""
Class responsible for instantiating object that are defined in config file.
The class tries to be smart about the following modules:
- Trainer will by default instantiate an 'distsup.trainer.Trainer'
- all items on the Data key will instantiate a 'distsup.data.Data'
- It will configure the Model key according to Dataset specification
Args:
config_path (str): Path pointing to the config file.
modify_dict (dict): Optional dictionary representing config
modifications.
store_path (str): Optional path to store linked config.
"""
default_class_dict = {
'Trainer': 'Trainer',
}
default_modules_dict = {
'Trainer': 'distsup.trainer',
'Datasets': 'distsup.data',
'Model': 'models',
}
def __init__(self, config_path, modify_dict={}, store_path=None, **kwargs):
config = config_utils.ConfigParser(config_path).get_config(modify_dict)
if store_path is not None:
config_utils.ConfigLinker(config).save_linked_config(store_path)
super(Configuration, self).__init__(
objects_config=config,
default_class_dict=Configuration.default_class_dict,
default_modules_dict=Configuration.default_modules_dict,
name=config_path,
**kwargs)
if 'Datasets' in self.objects_config:
self.cache['Datasets'] = DatasetConfigInstantiator(
self.objects_config['Datasets'],
default_modules_dict=_ConstantDict(
Configuration.default_modules_dict['Datasets']),
name='Config.Datasets')
def __getitem__(self, key):
if key == 'Model':
model_param = {'dataloader': self['Datasets']['train']}
return self._getitem('Model', additional_parameters=model_param)
else:
return super(Configuration, self).__getitem__(key)
class Globals(object):
"""Global configuration objects."""
cuda = torch.cuda.is_available()
cluster = ''
exp_tag = None
save_dir = None
exp_uuid = None
exp_config_fpath = None
# Track training progress. The trainer/loader will fill in proper values.
epoch = -1
current_iteration = -1
| 35.439189 | 79 | 0.659295 | [
"Apache-2.0"
] | distsup/DistSup | distsup/configuration/__init__.py | 5,245 | Python |
'''
use case handlers package
'''
from .find_average_temperature_handler import FindAverageTemperatureHandler
__all__ = [
'FindAverageTemperatureHandler'
]
| 17.888889 | 75 | 0.801242 | [
"MIT"
] | joaquinquintas/shipwell_backend_ricardo | core/handlers/__init__.py | 161 | Python |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import re
import sys
from importlib import import_module
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
CUSTOM_MODULES = set(['arch', 'eggroll', 'federatedml', 'fate_flow'])
USE_SOURCE_MODULES = set(['antlr4', 'mocks', 'TestTokenStreamRewriter'])
class DummyConfig(object):
def __init__(self, intersphinx_mapping=None, intersphinx_cache_limit=5, intersphinx_timeout=None):
self.intersphinx_mapping = intersphinx_mapping or {}
self.intersphinx_cache_limit = intersphinx_cache_limit
self.intersphinx_timeout = intersphinx_timeout
self.tls_verify = True
class DummyApp(object):
def __init__(self):
self.config = DummyConfig()
def get_python_standard_modules(version=None):
version = '{}.{}'.format(sys.version_info[0], sys.version_info[1]) if not version else version
module_cache_file = 'python{}_modules.csv'.format(version.replace('.', '_'))
if os.path.exists(module_cache_file):
print('read python {} standard modules'.format(version))
modules = list()
with open(module_cache_file, 'r') as fr:
while True:
line = fr.readline()
if not line:
break
modules.append(line.strip())
else:
from sphinx.ext.intersphinx import fetch_inventory
print('fetch python {} standard modules'.format(version))
url = "http://docs.python.org/{}/objects.inv".format(version)
modules = sorted(
list(
fetch_inventory(DummyApp(), "", url).get("py:module").keys()
)
)
with open(module_cache_file, 'w') as fw:
fw.write('\n'.join(modules))
return modules
def search_require_modules(project_dir):
grep_cmd = "find {} -name '*.py' | grep -v -E '*_pb2\.py' | grep -v -E '*_pb2_grpc\.py' | grep -v -E 'workflow\.py' | xargs -n1 cat | grep -E '^import|^from'".format(project_dir)
print(grep_cmd)
p = subprocess.Popen(grep_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
import_lines = stdout.decode('utf-8').strip().split('\n')
python_standard_modules = get_python_standard_modules('3.6')
require_modules = set()
require_lines = dict()
all_imports = set()
for line in import_lines:
import_module = re.sub('^import |^from ', '', line).split(' ')[0].strip()
require_module = import_module.split('.')[0]
if len(require_module) == 0:
continue
if ',' in require_module:
tmp = require_module.split(',')
else:
tmp = [require_module]
for r_m in tmp:
if r_m.startswith('.'):
continue
if r_m.endswith('_pb2'):
continue
if r_m in USE_SOURCE_MODULES:
continue
all_imports.add(line.strip())
if r_m in python_standard_modules:
continue
if r_m in CUSTOM_MODULES:
continue
require_modules.add(r_m)
require_lines[r_m] = line.strip()
return require_modules, require_lines, all_imports
def conda_env_install(module):
print('try install: {}'.format(module))
install_cmd = 'conda install -y {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
if p.returncode != 0:
print('try install again: {}'.format(module))
install_cmd = 'conda install -c conda-forge -y {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
return p.returncode
def pip_env_install(module):
print('try install: {}'.format(module))
install_cmd = 'pip install {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
return p.returncode
def try_import(module):
try:
import_module(module)
return 0
except Exception as e:
st = pip_env_install(module)
if st == 0:
return 1
else:
return 2
def check_require(require_modules, require_lines):
for require_module in require_modules:
st = try_import(require_module)
if st == 0:
continue
elif st == 1:
print('installed {}: {}\n'.format(require_module, require_lines[require_module]))
elif st == 2:
print('failed installed {}: {}\n'.format(require_module, require_lines[require_module]))
def check_import(all_imports):
dependent_modules = set()
dependent_lines = dict()
for import_code in all_imports:
python_cmd = "python -c '{}'".format(import_code)
p = subprocess.Popen(python_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
if p.returncode != 0:
# import error
stdout = stdout.decode('utf-8').strip().split('\n')
for line in stdout:
if line.startswith('ModuleNotFoundError:'):
require_module = line.strip().split(' ')[-1].strip("'").split('.')[0]
print('{}: {}'.format(require_module, import_code))
if require_module in CUSTOM_MODULES:
pass
# code error
else:
dependent_modules.add(require_module)
dependent_lines[require_module] = import_code
return dependent_modules, dependent_lines
if __name__ == '__main__':
print('project dir is: {}'.format(PROJECT_DIR))
print('start search import')
require_modules, require_lines, all_imports = search_require_modules(PROJECT_DIR)
print()
print('has {} require modules'.format(len(require_modules)))
print(require_modules)
print()
check_require(require_modules=require_modules, require_lines=require_lines)
print()
dependent_modules, dependent_lines = check_import(all_imports=all_imports)
print()
require_modules.update(dependent_modules)
require_lines.update(dependent_lines)
check_require(require_modules=require_modules, require_lines=require_lines)
print()
| 37.248756 | 182 | 0.605583 | [
"Apache-2.0"
] | Alice-6161/FATE | python/fate_flow/tests/check_fate_python_requirement.py | 7,487 | Python |
import itertools
import logging
import warnings
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import torch.nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair import file_utils
from flair.data import DataPoint, Sentence, Dictionary
from flair.datasets import DataLoader, SentenceDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods."""
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type. TODO: can we find a better name for this?"""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: Union[List[DataPoint], DataPoint]) -> torch.tensor:
"""Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
sentences: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation
results and a loss value. Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
@abstractmethod
def _get_state_dict(self):
"""Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
@abstractmethod
def _init_model_with_state_dict(state):
"""Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""
Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# in Flair <0.9.1, optimizer and scheduler used to train model are not saved
optimizer = scheduler = None
# write out a "model card" if one is set
if hasattr(self, 'model_card'):
# special handling for optimizer: remember optimizer class and state dictionary
if 'training_parameters' in self.model_card:
training_parameters = self.model_card['training_parameters']
if 'optimizer' in training_parameters:
optimizer = training_parameters['optimizer']
if checkpoint:
training_parameters['optimizer_state_dict'] = optimizer.state_dict()
training_parameters['optimizer'] = optimizer.__class__
if 'scheduler' in training_parameters:
scheduler = training_parameters['scheduler']
if checkpoint:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
training_parameters['scheduler_state_dict'] = scheduler.state_dict()
training_parameters['scheduler'] = scheduler.__class__
model_state['model_card'] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
# restore optimizer and scheduler to model card if set
if optimizer:
self.model_card['training_parameters']['optimizer'] = optimizer
if scheduler:
self.model_card['training_parameters']['scheduler'] = scheduler
@classmethod
def load(cls, model: Union[str, Path]):
"""
Loads the model from the given file.
:param model: the model file
:return: the loaded text classifier model
"""
model_file = cls._fetch_model(str(model))
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
if 'model_card' in state:
model.model_card = state['model_card']
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, 'model_card'):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card['flair_version']}\n"
param_out += f"-- PyTorch version {self.model_card['pytorch_version']}\n"
if 'transformers_version' in self.model_card:
param_out += f"-- Transformers version {self.model_card['transformers_version']}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = '\n'.join(f'-- {param} = {self.model_card["training_parameters"][param]}'
for param in self.model_card['training_parameters'])
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet trained or was trained with Flair version < 0.9.1)")
class Classifier(Model):
"""Abstract base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models
use the same evaluation routines and compute the same numbers.
Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair
implement the DefaultClassifier base class which implements Classifier."""
def evaluate(
self,
data_points: Union[List[DataPoint], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
import numpy as np
import sklearn
# read Dataset into data loader (if list of sentences passed, make Dataset first)
if not isinstance(data_points, Dataset):
data_points = SentenceDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size, num_workers=num_workers)
with torch.no_grad():
# loss calculation
eval_loss = 0
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: List[str] = []
all_true_values = {}
all_predicted_values = {}
sentence_id = 0
for batch in data_loader:
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels('predicted')
# predict for batch
loss_and_count = self.predict(batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name='predicted',
return_loss=True)
if isinstance(loss_and_count, Tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ': ' + gold_label.identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = '<unk>'
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.append(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ': ' + predicted_span.identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.append(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# finally, compute numbers
y_true = []
y_pred = []
for span in all_spans:
true_values = all_true_values[span] if span in all_true_values else ['O']
predicted_values = all_predicted_values[span] if span in all_predicted_values else ['O']
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_values:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter()
counter.update(list(itertools.chain.from_iterable(all_true_values.values())))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, count in counter.most_common():
if label_name == 'O': continue
if label_name in exclude_labels: continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true, y_pred, digits=4, target_names=target_names, zero_division=0, labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true, y_pred, target_names=target_names, zero_division=0, output_dict=True, labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
precision_score = round(classification_report_dict["micro avg"]["precision"], 4)
recall_score = round(classification_report_dict["micro avg"]["recall"], 4)
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! Could be an error in your corpus or how you "
"initialize the trainer!")
accuracy_score = precision_score = recall_score = micro_f_score = macro_f_score = main_score = 0.
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
# line for log file
log_header = "PRECISION\tRECALL\tF1\tACCURACY"
log_line = f"{precision_score}\t" f"{recall_score}\t" f"{micro_f_score}\t" f"{accuracy_score}"
if average_over > 0:
eval_loss /= average_over
result = Result(
main_score=main_score,
log_line=log_line,
log_header=log_header,
detailed_results=detailed_result,
classification_report=classification_report_dict,
loss=eval_loss
)
return result
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.identifier + label.value for label in datapoint.get_labels(gold_label_type)]
p = [label.identifier + label.value for label in datapoint.get_labels('predicted')]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = f"{datapoint.to_original_text()}\n" \
f" - Gold: {datapoint.get_labels(gold_label_type)}\n" \
f" - Pred: {datapoint.get_labels('predicted')}\n{correct_string}\n"
lines.append(eval_line)
return lines
class DefaultClassifier(Classifier):
"""Default base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers
are implemented here, including the loss calculation and the predict() method.
Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement
this class. You only need to implement the forward_pass() method to implement this base class.
"""
def forward_pass(self,
sentences: Union[List[DataPoint], DataPoint],
return_label_candidates: bool = False,
):
"""This method does a forward pass through the model given a list of data points as input.
Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits
produced by the decoder and labels are the string labels for each data point.
Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,
where data_points are the data points to which labels are added (commonly either Sentence or Token objects)
and candidate_labels are empty Label objects for each prediction (depending on the task Label,
SpanLabel or RelationLabel)."""
raise NotImplementedError
def __init__(self,
label_dictionary: Dictionary,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Dict[str, float] = None,
):
super().__init__()
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
if self.multi_label:
self.loss_function = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights)
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights)
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if 'default' in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {'default': x}
def forward_loss(self, sentences: Union[List[DataPoint], DataPoint]) -> torch.tensor:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def _calculate_loss(self, scores, labels):
if not any(labels): return torch.tensor(0., requires_grad=True, device=flair.device), 1
if self.multi_label:
labels = torch.tensor([[1 if l in all_labels_for_point else 0 for l in self.label_dictionary.get_items()]
for all_labels_for_point in labels], dtype=torch.float, device=flair.device)
else:
labels = torch.tensor([self.label_dictionary.get_idx_for_item(label[0]) if len(label) > 0
else self.label_dictionary.get_idx_for_item('O')
for label in labels], dtype=torch.long, device=flair.device)
return self.loss_function(scores, labels), len(labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, DataPoint):
sentences = [sentences]
# filter empty sentences
if isinstance(sentences[0], DataPoint):
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# reverse sort all sequences by their length
rev_order_len_index = sorted(range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True)
reordered_sentences: List[Union[DataPoint, str]] = [sentences[index] for index in rev_order_len_index]
dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
label_count = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
# stop if all sentences are empty
if not batch:
continue
scores, gold_labels, data_points, label_candidates = self.forward_pass(batch,
return_label_candidates=True)
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(label_candidates)
# if anything could possibly be predicted
if len(label_candidates) > 0:
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_score = softmax[s_idx, l_idx].item()
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
conf, idx = torch.max(softmax, dim=-1)
for data_point, label_candidate, c, i in zip(data_points, label_candidates, conf, idx):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == 'O': continue
label = label_candidate.spawn(value=label_value, score=c.item())
data_point.add_complex_label(label_name, label)
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold['default']
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self):
return super(flair.nn.Model, self).__str__().rstrip(')') + \
f' (weights): {self.weight_dict}\n' + \
f' (weight_tensor) {self.loss_weights}\n)'
| 45.285953 | 128 | 0.598907 | [
"MIT"
] | MaxDall/flair | flair/nn/model.py | 27,081 | Python |
#!/usr/bin/python
import sys
import json
import numpy as np
import cv2
import zmq
import time
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.optimizers import sgd
from os import listdir
from os.path import isfile, join
#-- Constants
imageSize = (128, 128)
hidden_size=20000
dataset_root_dir = "./dataset"
network_income_port = 9000
network_delivery_port = 9001
network_protocol = "tcp"
network_masked_ip = '127.0.0'#"192.168.14"
#-- Functions
def recieveImage(listener):
rc = listener.recv()
buf = buffer(rc)
rc = np.frombuffer(buf, dtype='uint8')
rc = list(rc)
rc = np.reshape(rc, (128, 128))
rc = rc.astype('uint8')
return rc
#-- Main Function
if __name__ == "__main__":
brain = Brain()
if (len(sys.argv) > 1 and sys.argv[1] == 'train'):
print "running in train mode"
if (len(sys.argv) > 2):
filename = sys.argv[2]
else:
filename = "model"
print "model : ", filename
brain.loadData()
brain.train()
brain.save('')
elif len(sys.argv) > 1 and sys.argv[1] == 'help':
print "Usage: " + sys.argv[0] + " [train | test | socket | collect] [model_name]\n"
elif len(sys.argv) > 1 and sys.argv[1] == 'collect':
print "runing in collection mode"
ctx = zmq.Context.instance()
listener = ctx.socket(zmq.REP)
listener.connect("{0}://{1}.{2}:{3}".format(network_protocol, network_masked_ip, '1', network_income_port))
#if (len(sys.argv) > 2):
# filename = sys.argv[2]
#else:
# filename = "model"
#brain.load(filename)
print "socket ready"
#listener.setsockopt(zmq.SUBSCRIBE, b'')
r = 0
while True:
# Recieveing Data
rc = recieveImage(listener);
rc = cv2.resize(rc, (300, 300), interpolation=0)
# Save recieved data
p = './data/frame_' + str(time.time()) + ".png"
cv2.imwrite(p, rc)
# Send responce
p = str(r)
listener.send_string(p)
cv2.imshow("img", rc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = 1 - r
listener.close(linger=0)
ctx.term()
elif len(sys.argv) > 1 and sys.argv[1] == 'socket':
print "runing in socket mode"
ctx = zmq.Context.instance()
listener = ctx.socket(zmq.REP)
listener.connect("{0}://{1}.{2}:{3}".format(network_protocol, network_masked_ip, '1', network_income_port))
#if (len(sys.argv) > 2):
# filename = sys.argv[2]
#else:
# filename = "model"
#brain.load(filename)
print "socket ready"
#listener.setsockopt(zmq.SUBSCRIBE, b'')
r = 0
while True:
rc = recieveImage(listener);
rc = cv2.resize(rc, (300, 300), interpolation=0)
cv2.imshow("img", rc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
listener.send(r)
r = 1 - r
#print brain.predict()
listener.close(linger=0)
ctx.term()
else:
print "runing in default mode"
if (len(sys.argv) > 1):
filename = sys.argv[1]
else:
filename = "model"
print "model : ", filename
brain.load(filename)
print brain.predict()
| 18.666667 | 109 | 0.636792 | [
"MIT"
] | ArefMq/SoccerBallDetection | src/modules/bd.py | 2,968 | Python |
import unittest
import pytest
import numpy as np
from deepchem.utils.molecule_graph import MoleculeGraphData, BatchMoleculeGraphData
class TestMoleculeGraph(unittest.TestCase):
def test_molecule_graph_data(self):
num_nodes, num_node_features = 4, 32
num_edges, num_edge_features = 6, 32
node_features = np.random.random_sample((num_nodes, num_node_features))
edge_features = np.random.random_sample((num_edges, num_edge_features))
targets = np.random.random_sample(5)
edge_index = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
])
graph_features = None
mol_graph = MoleculeGraphData(
node_features=node_features,
edge_index=edge_index,
targets=targets,
edge_features=edge_features,
graph_features=graph_features)
assert mol_graph.num_nodes == num_nodes
assert mol_graph.num_node_features == num_node_features
assert mol_graph.num_edges == num_edges
assert mol_graph.num_edge_features == num_edge_features
assert mol_graph.targets.shape == (5,)
def test_invalid_molecule_graph_data(self):
with pytest.raises(ValueError):
invalid_node_features_type = list(np.random.random_sample((5, 5)))
edge_index = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
])
targets = np.random.random_sample(5)
mol_graph = MoleculeGraphData(
node_features=invalid_node_features_type,
edge_index=edge_index,
targets=targets,
)
with pytest.raises(ValueError):
node_features = np.random.random_sample((5, 5))
invalid_edge_index_shape = np.array([
[0, 1, 2, 2, 3, 4],
[1, 2, 0, 3, 4, 0],
[2, 2, 1, 4, 0, 3],
])
targets = np.random.random_sample(5)
mol_graph = MoleculeGraphData(
node_features=node_features,
edge_index=invalid_edge_index_shape,
targets=targets,
)
with pytest.raises(TypeError):
node_features = np.random.random_sample((5, 5))
mol_graph = MoleculeGraphData(node_features=node_features)
def test_batch_molecule_graph_data(self):
num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]
num_node_features, num_edge_features = 32, 32
edge_index_list = [
np.array([[0, 1], [1, 2]]),
np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),
np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]])
]
targets = np.random.random_sample(5)
molecule_graphs = [
MoleculeGraphData(
node_features=np.random.random_sample((num_nodes_list[i],
num_node_features)),
edge_index=edge_index_list[i],
targets=targets,
edge_features=np.random.random_sample((num_edge_list[i],
num_edge_features)),
graph_features=None) for i in range(len(num_edge_list))
]
batch = BatchMoleculeGraphData(molecule_graphs)
assert batch.num_nodes == sum(num_nodes_list)
assert batch.num_node_features == num_node_features
assert batch.num_edges == sum(num_edge_list)
assert batch.num_edge_features == num_edge_features
assert batch.targets.shape == (3, 5)
assert batch.graph_index.shape == (sum(num_nodes_list),)
| 35.308511 | 83 | 0.637843 | [
"MIT"
] | cpfpengfei/deepchem | deepchem/utils/test/test_molecule_graph.py | 3,319 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Database(pulumi.CustomResource):
charset: pulumi.Output[str]
collation: pulumi.Output[str]
instance: pulumi.Output[str]
name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
def __init__(__self__, resource_name, opts=None, charset=None, collation=None, instance=None, name=None, project=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Database resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['charset'] = charset
__props__['collation'] = collation
if instance is None:
raise TypeError("Missing required property 'instance'")
__props__['instance'] = instance
__props__['name'] = name
__props__['project'] = project
__props__['self_link'] = None
super(Database, __self__).__init__(
'gcp:sql/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, charset=None, collation=None, instance=None, name=None, project=None, self_link=None):
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["charset"] = charset
__props__["collation"] = collation
__props__["instance"] = instance
__props__["name"] = name
__props__["project"] = project
__props__["self_link"] = self_link
return Database(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.717172 | 169 | 0.663655 | [
"ECL-2.0",
"Apache-2.0"
] | 23doors/pulumi-gcp | sdk/python/pulumi_gcp/sql/database.py | 4,427 | Python |
"""
Django settings for django_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [""]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_app']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "/var/www/django_app/django_app/static/"
| 25.791667 | 91 | 0.697577 | [
"CC0-1.0"
] | jorgeassis/darwinCoreGUI | django_app/settings.py | 3,095 | Python |
# -*- coding: utf-8 -*-
SET_SUGGESTIONS = '='
| 11.75 | 23 | 0.531915 | [
"Apache-2.0"
] | qazbnm456/VWGen | core/shell/shellSuggestion.py | 47 | Python |
#!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import build
build.run(True, True, True)
| 12 | 30 | 0.75 | [
"Apache-2.0"
] | TeamASM-Blur/Sonic-3-Blue-Balls-Edition | Working Disassembly/Build Scripts/buildAndVerify.py | 108 | Python |
# Copyright 2017 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setuptools-based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
long_description = "Google Genomics Protos for Python."
setup(
name='genomics_protos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description=long_description,
long_description=long_description,
# The project's main homepage.
url='',
# Author details
author='Thomas Colthurst, Jean-Philippe Martin',
author_email='[email protected], [email protected]',
# Choose your license
license='Apache Software License',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='Genomics protos',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["googleapis-common-protos"],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'sample': [],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# to be able to combine with other google packages
namespace_packages=[
'google', 'google.genomics',
'google.genomics.v1'
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 33.960938 | 94 | 0.68484 | [
"Apache-2.0"
] | google/genomics-protos | setup.py | 4,347 | Python |
# This sample verifies that the exception type validation
# handles the case where the exception type is a Type[X] object.
from typing import Type
exc: Type[Exception] = Exception
try:
1 / 0
except exc:
print("exc")
| 18.461538 | 65 | 0.683333 | [
"MIT"
] | MerHS/pytea | packages/pyright-internal/src/tests/samples/tryExcept3.py | 240 | Python |
import argparse
import copy
import os
import pickle
import random
import sys
from types import SimpleNamespace
import numpy as np
from domains.npuzzle import NPuzzle, macros
from experiments import search, iw, bfws
def parse_args():
"""Parse input arguments
Use --help to see a pretty description of the arguments
"""
if 'ipykernel' in sys.argv[0]:
sys.argv = [sys.argv[0]]
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int, default=15, choices=[8, 15, 24, 35, 48, 63, 80],
help='Number of tiles')
parser.add_argument('--random_seed','-s', type=int, default=1,
help='Seed to use for RNGs')
parser.add_argument('--macro_type','-m', type=str, default='primitive',
choices=['primitive','random','learned'],
help='Type of macro_list to consider during search')
parser.add_argument('--search_alg', type=str, default='gbfs',
choices = ['astar', 'gbfs', 'weighted_astar','bfws_r0', 'bfws_rg'],
help='Search algorithm to run')
parser.add_argument('--g_weight', type=float, default=None,
help='Weight for g-score in weighted A*')
parser.add_argument('--h_weight', type=float, default=None,
help='Weight for h-score in weighted A*')
parser.add_argument('--random_goal','-r', action='store_true', default=False,
help='Generate a random goal instead of the default solve configuration')
parser.add_argument('--max_transitions', type=lambda x: int(float(x)), default=5e5,
help='Maximum number of state transitions')
parser.add_argument('--bfws_precision', type=int, default=3,
help='The number of width values, w \in {1,...,P}, to use when the search algorithm is best-first width search')
return parser.parse_args()
def solve():
"""Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm"""
args = parse_args()
#
# Set up the scramble
random.seed(args.random_seed)
np.random.seed(args.random_seed)
start = NPuzzle(n=args.n).scramble(seed=args.random_seed)
if args.random_goal:
goal = NPuzzle(n=args.n).scramble(seed=args.random_seed+1000)
print('Using goal pattern: {:03d}'.format(args.random_seed+1000))
else:
goal = NPuzzle(n=args.n)
print('Using seed: {:03d}'.format(args.random_seed))
print('Start:', start)
print('Goal:', goal)
print('Start:', ' '.join(map(str,list(start))))
print('Goal: ', ' '.join(map(str,list(goal))))
# Define the macros / models
if args.macro_type == 'random':
macros.generate_random_macro_set(args.random_seed)
macro_namespace = {
'primitive': SimpleNamespace(macros=[], models=[]),
'random': macros.random,
'learned': macros.learned,
}[args.macro_type]
macro_list = macro_namespace.macros
model_list = macro_namespace.models
# Set up the search problem
search_fn = {
'astar': search.astar,
'gbfs': search.gbfs,
'weighted_astar': search.weighted_astar,
'bfws_r0': bfws.bfws,
'bfws_rg': bfws.bfws,
}[args.search_alg]
def get_successors(puz):
successors = [(copy.deepcopy(puz).transition(a), [a]) for a in puz.actions()]
if args.macro_type != 'primitive':
valid_macros = macro_list[puz.blank_idx]
valid_models = model_list[puz.blank_idx]
macro_successors = [(copy.deepcopy(puz).apply_macro(model=model), macro)
for (macro, model) in zip(valid_macros, valid_models)]
successors += macro_successors
return successors
search_dict = {
'start': start,
'is_goal': lambda node: node.state == goal,
'step_cost': lambda macro: 1,
'heuristic': lambda puz: len(puz.summarize_effects(baseline=goal)[0]),
'get_successors': get_successors,
'max_transitions': args.max_transitions,
}
if args.search_alg == 'weighted_astar':
assert (args.g_weight is not None
and args.h_weight is not None), 'Must specify weights if using weighted A*.'
gh_weights = (args.g_weight, args.h_weight)
search_dict['gh_weights'] = gh_weights
if 'bfws' in args.search_alg:
search_dict['precision'] = args.bfws_precision
if args.search_alg == 'bfws_rg':
goal_fns = [(lambda x, i=i: x.state[i] == goal[i]) for i, _ in enumerate(goal)]
relevant_atoms = iw.iw(1, start, get_successors, goal_fns)
if not relevant_atoms:
relevant_atoms = iw.iw(2, start, get_successors, goal_fns)
if not relevant_atoms:
relevant_atoms = start.all_atoms()
search_dict['R'] = relevant_atoms
#%% Run the search
search_results = search_fn(**search_dict)
#%% Save the results
tag = '{}-puzzle/'.format(args.n)
if args.random_goal:
tag += 'random_goal/'
else:
tag += 'default_goal/'
tag += args.macro_type
results_dir = 'results/npuzzle/{}/{}/'.format(args.search_alg,tag)
os.makedirs(results_dir, exist_ok=True)
with open(results_dir+'seed-{:03d}.pickle'.format(args.random_seed), 'wb') as file:
pickle.dump(search_results, file)
if __name__ == '__main__':
solve()
| 37.930556 | 136 | 0.622483 | [
"ECL-2.0",
"Apache-2.0"
] | camall3n/focused-macros | experiments/npuzzle/solve.py | 5,462 | Python |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe.utils import formatdate, fmt_money, flt, cstr, cint, format_datetime, format_time, format_duration
from frappe.model.meta import get_field_currency, get_field_precision
import re
from six import string_types
def format_value(value, df=None, doc=None, currency=None, translated=False):
'''Format value based on given fieldtype, document reference, currency reference.
If docfield info (df) is not given, it will try and guess based on the datatype of the value'''
if isinstance(df, string_types):
df = frappe._dict(fieldtype=df)
if not df:
df = frappe._dict()
if isinstance(value, datetime.datetime):
df.fieldtype = 'Datetime'
elif isinstance(value, datetime.date):
df.fieldtype = 'Date'
elif isinstance(value, datetime.timedelta):
df.fieldtype = 'Time'
elif isinstance(value, int):
df.fieldtype = 'Int'
elif isinstance(value, float):
df.fieldtype = 'Float'
else:
df.fieldtype = 'Data'
elif (isinstance(df, dict)):
# Convert dict to object if necessary
df = frappe._dict(df)
if value is None:
value = ""
elif translated:
value = frappe._(value)
if not df:
return value
elif df.get("fieldtype")=="Date":
return formatdate(value)
elif df.get("fieldtype")=="Datetime":
return format_datetime(value)
elif df.get("fieldtype")=="Time":
return format_time(value)
elif value==0 and df.get("fieldtype") in ("Int", "Float", "Currency", "Percent") and df.get("print_hide_if_no_value"):
# this is required to show 0 as blank in table columns
return ""
elif df.get("fieldtype") == "Currency":
default_currency = frappe.db.get_default("currency")
currency = currency or get_field_currency(df, doc) or default_currency
return fmt_money(value, precision=get_field_precision(df, doc), currency=currency)
elif df.get("fieldtype") == "Float":
precision = get_field_precision(df, doc)
# I don't know why we support currency option for float
currency = currency or get_field_currency(df, doc)
# show 1.000000 as 1
# options should not specified
if not df.options and value is not None:
temp = cstr(value).split(".")
if len(temp)==1 or cint(temp[1])==0:
precision = 0
return fmt_money(value, precision=precision, currency=currency)
elif df.get("fieldtype") == "Percent":
return "{}%".format(flt(value, 2))
elif df.get("fieldtype") in ("Text", "Small Text"):
if not re.search(r"(<br|<div|<p)", value):
return frappe.safe_decode(value).replace("\n", "<br>")
elif df.get("fieldtype") == "Markdown Editor":
return frappe.utils.markdown(value)
elif df.get("fieldtype") == "Table MultiSelect":
meta = frappe.get_meta(df.options)
link_field = [df for df in meta.fields if df.fieldtype == 'Link'][0]
values = [v.get(link_field.fieldname, 'asdf') for v in value]
return ', '.join(values)
elif df.get("fieldtype") == "Duration":
hide_days = df.hide_days
return format_duration(value, hide_days)
elif df.get("fieldtype") == "Text Editor":
return "<div class='ql-snow'>{}</div>".format(value)
return value
| 31.70297 | 119 | 0.710181 | [
"MIT"
] | EHASUN/frappe | frappe/utils/formatters.py | 3,202 | Python |
import re
from videos_id.platform import Platform
class Vimeo(Platform):
def __init__(self):
self.platform = "Vimeo"
def check_url(self, url):
pattern = r'https?:\/\/(?:www\.|player\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/(?:[^\/]*)\/videos\/|album\/(?:\d+)\/video\/|video\/|)(\d+)(?:$|\/|\?)'
match = re.search(pattern, url, re.IGNORECASE)
if match:
return match.group(1)
else:
return None | 27.823529 | 164 | 0.534884 | [
"MIT"
] | RentFreeMedia/python-video-ids | videos_id/provider/vimeo.py | 473 | Python |
import FWCore.ParameterSet.Config as cms
from RecoEgamma.PhotonIdentification.isolationCalculator_cfi import *
from RecoEgamma.PhotonIdentification.mipVariable_cfi import *
from RecoEcal.EgammaClusterProducers.hybridSuperClusters_cfi import *
from RecoEcal.EgammaClusterProducers.multi5x5BasicClusters_cfi import *
#
# producer for photons
#
photons = cms.EDProducer("GEDPhotonProducer",
photonProducer = cms.InputTag("photonCore"),
reconstructionStep = cms.string("tmp"),
outputPhotonCollection = cms.string(""),
pfEgammaCandidates = cms.InputTag(""),
valueMapPhotons = cms.string(""),
# photonCollection = cms.string(''),
regressionWeightsFromDB = cms.bool(True),
energyRegressionWeightsFileLocation = cms.string('/afs/cern.ch/user/b/bendavid/cmspublic/regweights/gbrph.root'),
energyRegressionWeightsDBLocation = cms.string('wgbrph'),
superClusterEnergyCorrFunction = cms.string("EcalClusterEnergyCorrection"),
superClusterEnergyErrorFunction = cms.string("EcalClusterEnergyUncertainty"),
superClusterCrackEnergyCorrFunction = cms.string("EcalClusterCrackCorrection"),
photonEcalEnergyCorrFunction = cms.string("EcalClusterEnergyCorrectionObjectSpecific"),
#candidateP4type = cms.string("fromRegression"),
candidateP4type = cms.string("fromEcalEnergy"),
isolationSumsCalculatorSet = cms.PSet(isolationSumsCalculator),
mipVariableSet = cms.PSet(mipVariable),
usePrimaryVertex = cms.bool(True),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
posCalc_t0_endcPresh = cms.double(3.6),
posCalc_logweight = cms.bool(True),
posCalc_w0 = cms.double(4.2),
hbheInstance = cms.string(''),
posCalc_t0_endc = cms.double(6.3),
barrelEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
hbheModule = cms.string('hbhereco'),
endcapEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
preshowerHits = cms.InputTag("ecalPreshowerRecHit","EcalRecHitsES"),
hcalTowers = cms.InputTag("towerMaker"),
runMIPTagger = cms.bool(True),
highEt = cms.double(100.),
minR9Barrel = cms.double(0.94),
minR9Endcap = cms.double(0.95),
hOverEConeSize = cms.double(0.15),
posCalc_x0 = cms.double(0.89),
posCalc_t0_barl = cms.double(7.7),
minSCEtBarrel = cms.double(10.0),
minSCEtEndcap = cms.double(10.0),
maxHoverEBarrel = cms.double(0.5),
maxHoverEEndcap = cms.double(0.5),
ecalRecHitSumEtOffsetBarrel = cms.double(999999999),
ecalRecHitSumEtSlopeBarrel = cms.double(0.),
ecalRecHitSumEtOffsetEndcap = cms.double(999999999),
ecalRecHitSumEtSlopeEndcap = cms.double(0.),
hcalTowerSumEtOffsetBarrel = cms.double(999999999),
hcalTowerSumEtSlopeBarrel = cms.double(0.),
hcalTowerSumEtOffsetEndcap = cms.double(999999999),
hcalTowerSumEtSlopeEndcap = cms.double(0.),
nTrackSolidConeBarrel =cms.double(999999999),
nTrackSolidConeEndcap =cms.double(999999999),
nTrackHollowConeBarrel =cms.double(999999999),
nTrackHollowConeEndcap =cms.double(999999999),
trackPtSumSolidConeBarrel =cms.double(999999999),
trackPtSumSolidConeEndcap =cms.double(999999999),
trackPtSumHollowConeBarrel =cms.double(999999999),
trackPtSumHollowConeEndcap =cms.double(999999999),
sigmaIetaIetaCutBarrel=cms.double(999999999),
sigmaIetaIetaCutEndcap=cms.double(999999999),
posCalcParameters = cms.PSet( T0_barl = cms.double(7.4),
T0_endc = cms.double(6.3),
T0_endcPresh = cms.double(3.6),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
),
RecHitFlagToBeExcludedEB = cleanedHybridSuperClusters.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEB = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
RecHitFlagToBeExcludedEE = multi5x5BasicClustersCleaned.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEE = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
checkHcalStatus = cms.bool(True),
)
photonsFromMultiCl = photons.clone(
photonProducer = 'photonCoreFromMultiCl'
)
islandPhotons = cms.EDProducer("PhotonProducer",
photonCoreProducer = cms.InputTag("islandPhotonCore"),
regressionWeightsFromDB = cms.bool(True),
energyRegressionWeightsFileLocation = cms.string('/afs/cern.ch/user/b/bendavid/cmspublic/regweights/gbrph.root'),
energyRegressionWeightsDBLocation = cms.string('wgbrph'),
superClusterEnergyCorrFunction = cms.string("EcalClusterEnergyCorrection"),
superClusterEnergyErrorFunction = cms.string("EcalClusterEnergyUncertainty"),
superClusterCrackEnergyCorrFunction = cms.string("EcalClusterCrackCorrection"),
photonEcalEnergyCorrFunction = cms.string("EcalClusterEnergyCorrectionObjectSpecific"),
candidateP4type = cms.string("fromEcalEnergy"),
isolationSumsCalculatorSet = cms.PSet(isolationSumsCalculator),
mipVariableSet = cms.PSet(mipVariable),
usePrimaryVertex = cms.bool(True),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
posCalc_t0_endcPresh = cms.double(3.6),
posCalc_logweight = cms.bool(True),
posCalc_w0 = cms.double(4.2),
hbheInstance = cms.string(''),
posCalc_t0_endc = cms.double(6.3),
barrelEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
hbheModule = cms.string('hbhereco'),
endcapEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
hcalTowers = cms.InputTag("towerMaker"),
runMIPTagger = cms.bool(True),
highEt = cms.double(100.),
minR9Barrel = cms.double(10.0),
minR9Endcap = cms.double(10.0),
hOverEConeSize = cms.double(0.15),
posCalc_x0 = cms.double(0.89),
posCalc_t0_barl = cms.double(7.7),
minSCEtBarrel = cms.double(5.0),
minSCEtEndcap = cms.double(15.0),
maxHoverEBarrel = cms.double(0.99),
maxHoverEEndcap = cms.double(0.5),
ecalRecHitSumEtOffsetBarrel = cms.double(999999999),
ecalRecHitSumEtSlopeBarrel = cms.double(0.),
ecalRecHitSumEtOffsetEndcap = cms.double(999999999),
ecalRecHitSumEtSlopeEndcap = cms.double(0.),
hcalTowerSumEtOffsetBarrel = cms.double(999999999),
hcalTowerSumEtSlopeBarrel = cms.double(0.),
hcalTowerSumEtOffsetEndcap = cms.double(999999999),
hcalTowerSumEtSlopeEndcap = cms.double(0.),
nTrackSolidConeBarrel =cms.double(999999999),
nTrackSolidConeEndcap =cms.double(999999999),
nTrackHollowConeBarrel =cms.double(999999999),
nTrackHollowConeEndcap =cms.double(999999999),
trackPtSumSolidConeBarrel =cms.double(999999999),
trackPtSumSolidConeEndcap =cms.double(999999999),
trackPtSumHollowConeBarrel =cms.double(999999999),
trackPtSumHollowConeEndcap =cms.double(999999999),
sigmaIetaIetaCutBarrel=cms.double(999999999),
sigmaIetaIetaCutEndcap=cms.double(999999999),
posCalcParameters = cms.PSet( T0_barl = cms.double(7.4),
T0_endc = cms.double(6.3),
T0_endcPresh = cms.double(3.6),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
),
RecHitFlagToBeExcludedEB = cleanedHybridSuperClusters.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEB = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
RecHitFlagToBeExcludedEE = multi5x5BasicClustersCleaned.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEE = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
)
| 52.222222 | 123 | 0.694618 | [
"Apache-2.0"
] | Abd-Elrazek/cmssw | RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py | 7,990 | Python |
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation <= len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | 27.266129 | 228 | 0.721779 | [
"Apache-2.0"
] | anuragbms/Sales-forecasting-with-RNNs | MetamorphicTests/mutants_of_interest/sales_forecasting_file/257_bug.py | 10,143 | Python |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageEngine, MySQLStoreType
from fate_arch.storage import StorageTableBase
class StorageTable(StorageTableBase):
def __init__(
self,
cur,
con,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: MySQLStoreType = MySQLStoreType.InnoDB,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.MYSQL,
store_type=store_type,
)
self._cur = cur
self._con = con
def check_address(self):
schema = self.meta.get_schema()
if schema:
sql = "SELECT {},{} FROM {}".format(
schema.get("sid"), schema.get("header"), self._address.name
)
feature_data = self.execute(sql)
for feature in feature_data:
if feature:
break
return True
@staticmethod
def get_meta_header(feature_name_list):
create_features = ""
feature_list = []
feature_size = "varchar(255)"
for feature_name in feature_name_list:
create_features += "{} {},".format(feature_name, feature_size)
feature_list.append(feature_name)
return create_features, feature_list
def _count(self):
sql = "select count(*) from {}".format(self._address.name)
try:
self._cur.execute(sql)
# self.con.commit()
ret = self._cur.fetchall()
count = ret[0][0]
except:
count = 0
return count
def _collect(self, **kwargs) -> list:
id_name, feature_name_list, _ = self._get_id_feature_name()
id_feature_name = [id_name]
id_feature_name.extend(feature_name_list)
sql = "select {} from {}".format(",".join(id_feature_name), self._address.name)
data = self.execute(sql)
for line in data:
feature_list = [str(feature) for feature in list(line[1:])]
yield line[0], self.meta.get_id_delimiter().join(feature_list)
def _put_all(self, kv_list, **kwargs):
id_name, feature_name_list, id_delimiter = self._get_id_feature_name()
feature_sql, feature_list = StorageTable.get_meta_header(feature_name_list)
id_size = "varchar(100)"
create_table = (
"create table if not exists {}({} {} NOT NULL, {} PRIMARY KEY({}))".format(
self._address.name, id_name, id_size, feature_sql, id_name
)
)
self._cur.execute(create_table)
sql = "REPLACE INTO {}({}, {}) VALUES".format(
self._address.name, id_name, ",".join(feature_list)
)
for kv in kv_list:
sql += '("{}", "{}"),'.format(kv[0], '", "'.join(kv[1].split(id_delimiter)))
sql = ",".join(sql.split(",")[:-1]) + ";"
self._cur.execute(sql)
self._con.commit()
def _destroy(self):
sql = "drop table {}".format(self._address.name)
self._cur.execute(sql)
self._con.commit()
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
sql = "create table {}.{} select * from {};".format(namespace, name, self._address.name)
self._cur.execute(sql)
self._con.commit()
def execute(self, sql, select=True):
self._cur.execute(sql)
if select:
while True:
result = self._cur.fetchone()
if result:
yield result
else:
break
else:
result = self._cur.fetchall()
return result
def _get_id_feature_name(self):
id = self.meta.get_schema().get("sid", "id")
header = self.meta.get_schema().get("header")
id_delimiter = self.meta.get_id_delimiter()
if header:
if isinstance(header, str):
feature_list = header.split(id_delimiter)
elif isinstance(header, list):
feature_list = header
else:
feature_list = [header]
else:
raise Exception("mysql table need data header")
return id, feature_list, id_delimiter
| 34.958333 | 96 | 0.582439 | [
"Apache-2.0"
] | FutaoJia97/FATE | python/fate_arch/storage/mysql/_table.py | 5,034 | Python |
import requests
from . import FeedSource, _request_headers
# pylint: disable=no-member
class WorldCoinIndex(FeedSource): # Weighted average from WorldCoinIndex
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = getattr(self, 'timeout', 15)
if not hasattr(self, 'api_key'):
raise Exception("WorldCoinIndex FeedSource requires 'api_key'.")
def _fetch(self):
feed = {}
for base in self.bases:
url = "https://www.worldcoinindex.com/apiservice/v2getmarkets?key={apikey}&fiat={base}"
response = requests.get(url=url.format(apikey=self.api_key, base=base),
headers=_request_headers, timeout=self.timeout)
result = response.json()['Markets']
for market in result:
for ticker in market:
(quote, returnedBase) = ticker['Label'].split('/')
if base == returnedBase and quote in self.quotes:
self.add_rate(feed, base, quote, ticker['Price'], ticker['Volume_24h'] / ticker['Price'])
return feed
| 46.08 | 113 | 0.598958 | [
"MIT"
] | Zapata/bitshares-pricefeed | bitshares_pricefeed/sources/worldcoinindex.py | 1,152 | Python |
# Copyright 2016, VIXL authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import util
def FilterKnownValgrindTestFailures(tests):
rc, output = util.getstatusoutput('valgrind --version')
if rc != 0:
util.abort('Failed to get the Valgrind version.')
version = re.search('^valgrind-([0-9]+)\.([0-9]+)\.([0-9]+)', output)
if not version:
util.abort('Failed to get the Valgrind version.')
major = int(version.group(1))
minor = int(version.group(2))
if major > 3 or (major == 3 and minor > 10):
return tests
reason = "Valgrind versions before 3.11 have issues with fused multiply-add, " \
"so disable the affected tests."
known_valgrind_test_failures = {
'AARCH64_SIM_fmadd_d',
'AARCH64_SIM_fmadd_s',
'AARCH64_SIM_fmla_2D',
'AARCH64_SIM_fmla_2D_2D_D',
'AARCH64_SIM_fmla_2S',
'AARCH64_SIM_fmla_2S_2S_S',
'AARCH64_SIM_fmla_4S',
'AARCH64_SIM_fmla_4S_4S_S',
'AARCH64_SIM_fmla_D_D_D',
'AARCH64_SIM_fmls_2D',
'AARCH64_SIM_fmls_2D_2D_D',
'AARCH64_SIM_fmls_2S',
'AARCH64_SIM_fmls_2S_2S_S',
'AARCH64_SIM_fmls_4S',
'AARCH64_SIM_fmls_4S_4S_S',
'AARCH64_SIM_fmls_D_D_D',
'AARCH64_SIM_fmsub_d',
'AARCH64_SIM_fmsub_s',
'AARCH64_SIM_fnmadd_d',
'AARCH64_SIM_fnmadd_s',
'AARCH64_SIM_fnmsub_d',
'AARCH64_SIM_fnmsub_s',
'AARCH64_SIM_frecps_2D',
'AARCH64_SIM_frecps_D',
'AARCH64_SIM_frsqrts_2D',
'AARCH64_SIM_frsqrts_D'
}
filtered_list = [x for x in tests if x not in known_valgrind_test_failures]
return (filtered_list, len(tests) - len(filtered_list), reason)
def FilterKnownTestFailures(tests, **env):
skipped = []
if env.get('under_valgrind'):
tests, n_tests_skipped, reason = FilterKnownValgrindTestFailures(tests)
skipped.append( (n_tests_skipped, reason) )
return (tests, skipped)
| 37.168539 | 82 | 0.739117 | [
"BSD-3-Clause"
] | bwasti/vixl | tools/known_test_failures.py | 3,308 | Python |
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
if __name__ == '__main__':
# input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'
# output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'
# a = load_pickle(input_file)
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
# save_pickle(a, output_file)
input_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
output_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
a = load_pickle(input_file)
print(a['plans_per_stage'])
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
a['plans_per_stage'][0]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][1]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][0]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][1]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][0]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][1]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][0]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
a['plans_per_stage'][1]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
save_pickle(a, output_file) | 69.041667 | 117 | 0.639107 | [
"Apache-2.0"
] | Jiawei-Yang/TumorCP | nnunet/experiment_planning/change_batch_size.py | 1,657 | Python |
import sys
from flask import Flask, jsonify, request, url_for
from flask_login import LoginManager, login_required, current_user
from marshmallow import ValidationError
from slugify import slugify
from entity import User, db
from model import user_schema, ma, users_schema
login_manager = LoginManager()
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///../resources/user.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
ma.init_app(app)
login_manager.init_app(app)
@app.route('/v1/user/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return user_schema.jsonify(user)
@app.route('/v1/user', methods=['POST'])
def create_user():
try:
user = User.query.filter(User.user_name == request.form.get('user_name')).first()
if user and user.user_name:
raise Exception('User exist!')
user = user_schema.load(request.form)
except ValueError as errors:
resp = jsonify(errors.messages)
resp.status_code = 400
return resp
user.user_name = slugify(request.form.get('user_name'))
db.session.add(user)
db.session.commit()
location = url_for("get_user", id=user.id)
resp = jsonify({'message': 'created'})
resp.status_code = 201
resp.headers['location'] = location
return resp
@app.route('/v1/users', methods=['GET'])
def get_users():
users = User.query.all()
return users_schema.jsonify(users)
@app.route('/v1/user/<int:id>', methods=['PUT'])
def edit_user(id):
user = User.query.get_or_404(id)
try:
user = user_schema.load(request.form, instance=user)
except ValidationError as errors:
resp = jsonify(errors.messages)
resp.status_code = 400
return resp
user.user_name = slugify(user.user_name)
db.session.add(user)
db.session.commit()
location = url_for("get_user", id=user.id)
resp = jsonify({'message': 'updated'})
resp.status_code = 201
resp.headers['location'] = location
return resp
@app.route('/v1/user/<int:id>', methods=['DELETE'])
def delete_user(id):
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
return jsonify({"message": "deleted"})
@app.errorhandler(404)
def page_not_found(error):
resp = jsonify({"error": "not found"})
resp.status_code = 404
return resp
@app.route('/profile')
@login_required
def user_profile():
return jsonify(current_user)
@app.route('/whoami')
def who_am_i():
if current_user.is_authenticated:
name = current_user.name
else:
name = 'Anonymous'
return jsonify({'name': name})
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('Authorization')
if not api_key:
return None
return User.query.filter_by(api_key=api_key).first()
if __name__ == "__main__":
if "createdb" in sys.argv:
with app.app_context():
db.create_all()
print("Database created!")
elif "seeddb" in sys.argv:
with app.app_context():
p1 = User(address="205 nguyen duy trinh", name="hoang", user_name="hoang",
image_url="http://example.com/rover.jpg", api_key="abc123")
db.session.add(p1)
p2 = User(address="truong quang trach", name="tuan", user_name="nguyen",
image_url="http://example.com/spot.jpg", api_key="abc345")
db.session.add(p2)
db.session.commit()
print("Database seeded!")
else:
app.run(debug=True)
| 26.12766 | 89 | 0.658252 | [
"Apache-2.0"
] | tuannguyendang/montypython | controller/user_controller.py | 3,684 | Python |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import RK45
f_out = "E:\\1\\P_rk4.txt" # address file for output
f2 = open(f_out,"w+")
def du_dx(x,y):
wa=1 # atomic frequency
wp=0.6 # field frequency
g=0.6 # coupling strength
n = 1 # number of photons
A = n*wp+(wa/2)
B = (1+n)*wp-(wa/2)
X = n+1
C = np.sqrt(X)
dydx_1= A*y[1]+g*C*y[3]
dydx_2= -A*y[0]-g*C*y[2]
dydx_3= B*y[3]+g*C*y[1]
dydx_4= -B*y[2]-g*C*y[0]
return [dydx_1,dydx_2,dydx_3,dydx_4]
y_0 = (1/np.sqrt(2),0,1/np.sqrt(2),0) # initial value
# print("y_0 = ",y_0)
m = 1000
ti = 0
tf = 30
h = tf/m
tspan = np.arange(ti,tf,h)
print(h)
for i in tspan:
print(i)
v = RK45(du_dx,t0 =i,y0 = y_0,t_bound=i) # 4 answer of dydx_1,...,dydx_4
print(v.y[0:])
# print(type(v))
# print("v.t[0] = ",v.t[0])
# print(len(v.t))
# print("------------------")
# print(v.y)
# print(len(v.t))
# print("------------------")
# y_1 = v.y[:,0]
# print("y_1 = ",y_1)
# print("------------------")
# y_2 = v.y[0,:]
# print("y_2 = ",y_2)
# print("------------------")
# y_3 = v.y[0,0]
# print("y_3 = ",y_3)
# print("------------------")
# # --------------------------
# # print in file
# count = 0
# while count<1000:
# y_i = v.y[:,count]
# f2.write(str(v.t[count]))
# f2.write(" ")
# for i in y_i:
# i = round(i,4)
# i = str(i)
# f2.write(i)
# f2.write(len(i)*" ")
# f2.write("\n")
# count = count+1
# # y_prime = u_s[:,1]
# # print(y_prime)
# plt.plot(v.t, v.y[0,:],'-', label='r(t)')
# plt.xlabel("x")
# plt.ylabel("y")
# plt.show() | 23.457143 | 76 | 0.476248 | [
"MIT"
] | Mahdi-Asadi/Python_Thesis | RK45 - Copy.py | 1,642 | Python |
"""
Utility functions for the btcpayserver client
"""
import pickle
from app.db import get_db
from config import Config
def get_client():
"""
Loads the serialized client from database
"""
db = get_db()
pickled_client = db.execute(
"SELECT pickled_client FROM btc_pay_server_client ORDER BY id"
).fetchone()
return pickle.loads(pickled_client['pickled_client'])
def create_invoice(price=Config.TIP_AMOUNT, currency=Config.TIP_CURRENCY, order_id=None, desc=None, notification_url=None, redirect_url=None):
"""
Creates a new invoice and returns invoice id
:param price: a given price (default is bitcoin)
:param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc
:return: invoice_id -> str
"""
client = get_client()
try:
new_invoice = client.create_invoice(
{
'price': price,
'currency': currency,
'orderId': order_id,
'itemDesc': desc,
'notificationUrl': notification_url,
'redirectUrl': redirect_url
}
)
return new_invoice['id']
except Exception as e:
print(e)
return 'XXX'
def get_invoice(invoice_id: str):
"""
Get an invoice by ID
"""
client = get_client()
return client.get_invoice(invoice_id)
def get_most_recent_invoice():
"""
Returns the most return invoice created
"""
client = get_client()
return client.get_invoices()[:1]
| 25.864407 | 142 | 0.621887 | [
"MIT"
] | psqnt/flask-btcpay-example | app/btcpayserver_helper.py | 1,526 | Python |
DEFAULT_INIT = "variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=0.78)"
# Patchining in some alternate conformer arcitectures
def add_SE_block(network, in_layer, name_prefix, se_act="swish"):
# This adds and SE block anywhere
# Returns the output layer name
network[name_prefix + "_SE_reduce"] = {
"class" : "reduce",
"mode" : "mean",
"from" : in_layer,
"axes" : "T"
}
network[name_prefix + "_SE_linear1"] = {
"class" : "linear",
"from" : name_prefix + "_SE_reduce",
"n_out" : 32
}
network[name_prefix + "_SE_act1"] = {
"class" : "activation",
"activation" : se_act,
"from" : name_prefix + "_SE_linear1"
}
network[name_prefix + "_SE_linear2"] = {
"class" : "linear",
"from" : name_prefix + "_SE_act1",
"n_out" : 256
}
network[name_prefix + "_SE_elm_mul"] = {
"class" : "eval",
"eval" : "source(0) * source(1)",
"from" : [name_prefix + "_SE_linear2", in_layer]
}
return name_prefix + "_SE_elm_mul"
def conformer_enc_layer_all_in_one_SE(
network, name, num_heads, model_dim, key_dim, value_dim, ff_dim,
kernel_size,
sa_dropout, sa_post_dropout, ff_activation_dropout, ff_post_dropout,
from_layers, conv_post_dropout,
initialization=DEFAULT_INIT, ff_activation="swish",
end_layernorm=False,
normal_conv=False, output_channels=16,
kernel_size_for_feature=3,
attention_left_only=False, separated=False,
windowing=False, window_size=None, gauss_window=False,
relative_pe=False, fixed=False, clipping=100, untied_pe=False, relative_pe_transformer_xl=False,
linear_mapping = True, linear_mapping_bias = False, switch = False,
energy_factor = -0.5,
half_ratio = 0.5,
half_ratio_levels = None,
with_se = True,
se_pos = None,
se_act = "swish"
):
if windowing or untied_pe or relative_pe_transformer_xl or energy_factor != -0.5:
assert separated
if with_se:
assert not se_pos is None, "this version needs se_pos != None"
if half_ratio_levels is not None:
idx = int(name.split("_")[-1]) - 1 # Hack but does the trick
half_ratio = half_ratio_levels[idx]
if from_layers is None:
from_layers = ["data"]
elif isinstance(from_layers, str):
from_layers = [from_layers]
## first ffn with residual connection
network[f"{name}_ff1_laynorm"] = {'class': "layer_norm",
'from': from_layers}
network[f"{name}_ff1_conv1"] = {
'class': "linear", 'activation': ff_activation, 'with_bias': True,
'from': [f"{name}_ff1_laynorm"],
'n_out': ff_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff1_conv2"] = {
'class': "linear", 'activation': None, 'with_bias': True,
'from': [f"{name}_ff1_conv1"], 'dropout': ff_activation_dropout,
'n_out': model_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff1_drop"] = {'class': "dropout",
'dropout': ff_post_dropout,
'from': [f"{name}_ff1_conv2"]}
network[f"{name}_ff1_drop_half"] = {
'class': "eval",
'eval': f"{half_ratio} * source(0)",
'from': [f"{name}_ff1_drop"]
}
network[f"{name}_ff1_out"] = {
'class': "combine", 'kind': "add",
'from': from_layers + [f"{name}_ff1_drop_half"]
}
## MHSA module
network[f"{name}_self_att_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_ff1_out"]}
if separated:
key_per_head = int(key_dim / num_heads)
value_per_head = int(value_dim / num_heads)
network[f"{name}_att_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': key_dim,
'forward_weights_init': initialization
}
# query per head
network[f"{name}_att_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_query0"],
}
network[f"{name}_att_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_value0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': value_dim,
'forward_weights_init': initialization}
## split the key and value vectors for each head
network[f"{name}_att_key"] = {
'class': "split_dims", 'axis': "F", 'dims': (num_heads, key_per_head),
'from': [f"{name}_att_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_value"] = {
'class': "split_dims", 'axis': "F", 'dims': (num_heads, value_per_head),
'from': [f"{name}_att_value0"], # (B, enc-T, H, D'/H)
}
## encoder-decoder energy
## we have exactly enc-T energy values
network[f"{name}_att_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_key", f"{name}_att_query"]} # (B, H, key-T, query-T)
## normalize the attention weights (depends on key/query dim.)
network[f"{name}_att_weights"] = {
'class': "softmax_over_spatial", 'from': [f"{name}_att_energy"],
'energy_factor': key_per_head ** energy_factor, # (B, H, key-T, query-T), key-T is where softmax is performed
}
# relative_pe as in transformer xl
if relative_pe_transformer_xl and not relative_pe and not untied_pe:
shared_layers = False
network[f"{name}_att_emb_emb"] = network[f"{name}_att_energy"]
# (B, enc-T, d_pos)
assert 'source' in network
if 'pos' not in network:
network["pos"] = {
'class': "positional_encoding",
'add_to_input': False,
'from': ["source"],
'n_out': model_dim
}
# network['pos_with_0'] = {
# "class": "eval", "from": ["pos"],
# "eval": f"tf.slice(tf.concat([tf.expand_dims(tf.tile(tf.reshape([0, 1] * ({model_dim}//2), " \
# f"(1, {model_dim})), [tf.shape(source(0))[0], 1]), 1), source(0)], 1), [0, 0, 0], [-1, tf.shape(source(0))[1], -1])"}
if shared_layers:
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ['pos'], 'n_out': key_dim, # (B, enc-T, D) # pos_with_0
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
else:
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ['pos'], 'n_out': key_dim, # (B, enc-T, D) # pos_with_0
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
# (B, enc-T, H, D/H), (B, dec-T, H, D/H) -> (B, H, enc-T, dec-T)
network[f"{name}_att_emb_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_query"]
}
if shared_layers:
network[f"{name}_att_emb_pos"]['from'] = ["att_pos_key", f"{name}_att_query"]
# (B, H, enc-T, dec-T)
network[f"{name}_att_emb_pos_shifted"] = {
'class': "eval",
'eval': "self.network.get_config().typed_value('rel_shift')(source(0))",
'from': [f"{name}_att_emb_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1}
}
# (B, 4, F)
if shared_layers:
network["pos_emb_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head),
'add_time_axis': True,
'init': DEFAULT_INIT
}
else:
network[f"{name}_pos_emb_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head),
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc-T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T=1)
network[f"{name}_att_pos_emb"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_key", f"{name}_pos_emb_bias"],
'out_type': {'shape': (num_heads, None, 1)}
#'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
if shared_layers:
network[f"{name}_att_pos_emb"]['from'] = [f"{name}_att_key", "pos_emb_bias"]
network[f"{name}_att_pos_emb_tiled"] = {
'class': "rel_shift",
'rel_shift': False,
'from': [f"{name}_att_pos_emb"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
if shared_layers:
network["pos_pos_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head), # (B, d, 4)
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1)
network["att_pos_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "pos_pos_bias"],
'out_type': {'shape': (num_heads, None, 1)}
# 'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
# (B, H, T, T')
network["att_pos_pos_shifted"] = {
'class': "rel_shift",
'from': ["att_pos_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
else:
network[f"{name}_pos_pos_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head), #(B, d, 4)
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1)
network[f"{name}_att_pos_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_pos_pos_bias"],
'out_type': {'shape': (num_heads, None, 1)}
#'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
# (B, H, T, T')
network[f"{name}_att_pos_pos_shifted"] = {
'class': "rel_shift",
'from': [f"{name}_att_pos_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
network[f"{name}_att_energy"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_emb_emb", f"{name}_att_pos_emb_tiled",
f"{name}_att_emb_pos_shifted", f"{name}_att_pos_pos_shifted"]
}
if shared_layers:
network[f"{name}_att_energy"]['from'] = [f"{name}_att_emb_emb", f"{name}_att_pos_emb_tiled",
f"{name}_att_emb_pos_shifted", "att_pos_pos_shifted"]
if untied_pe and not relative_pe:
assert 'source' in network
if 'pos' not in network:
network["pos"] = {
'class': "positional_encoding",
'add_to_input': False,
'from': ["source"],
'n_out': model_dim
}
# shared
if False:
if 'att_pos_query0' not in network:
network["att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network["att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': ["att_pos_query0"],
}
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
network["att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", "att_pos_energy"]
}
# per layer
if False:
network[f"{name}_att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network[f"{name}_att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_pos_query0"],
}
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", f"{name}_att_pos_energy"]
}
# with corrected normalization factor
if True:
network[f"{name}_att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network[f"{name}_att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_pos_query0"],
}
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", f"{name}_att_pos_energy"]
}
network[f"{name}_att_weights"]['energy_factor'] = (2 * key_per_head) ** energy_factor
# scale per layer
if False:
if 'att_pos_query0' not in network:
network["att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network["att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': ["att_pos_query0"],
}
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
network["att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "att_pos_query"]}
network[f"{name}_att_pos_energy_scale"] = {
'class': 'variable',
'shape': (num_heads,),
'init': 1.0,
'add_batch_axis': False
}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "eval",
'eval': f"tf.add(source(0), tf.multiply(source(1), tf.reshape(source(2), (1, {num_heads}, 1, 1))))",
'from': [f"{name}_att_energy", "att_pos_energy", f"{name}_att_pos_energy_scale"]
}
network[f"{name}_att_weights"]["from"] = [f"{name}_att_energy_with_pos_corr"]
## attention weights dropout
network[f"{name}_att_weights_drop"] = {
'class': "dropout", 'dropout_noise_shape': {'*': None},
'dropout': sa_dropout, 'from': [f"{name}_att_weights"],
}
## now we have an attention weight value for each encoder-side output
## we get per head one vector
network[f"{name}_att0"] = {
'class': "generic_attention", 'weights': f"{name}_att_weights_drop",
'base': f"{name}_att_value", # (B, T, H, V) #(B, H, V)
}
network[f"{name}_self_att_att"] = {
'class': "merge_dims", 'axes': "static", # "static"
'from': [f"{name}_att0"]
}
## not sure, if this works
if windowing:
#hard masking
if not gauss_window:
eval_win_size = f'tf.expand_dims(tf.tile(tf.expand_dims(tf.expand_dims(tf.constant({window_size}, dtype=tf.int32), axis = -1), axis = -1), '\
f'[1, tf.shape(source(0))[-2], tf.shape(source(0))[-1]]), 0)'
eval_win_start = f'tf.expand_dims(tf.map_fn(fn = lambda t: tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-1]), 0), '\
f'[tf.shape(source(0))[2], 1]) - t, elems=tf.constant({window_size}, dtype=tf.int32)//2), 0)'
# eval_encoderT_pos = 'tf.tile(tf.expand_dims(tf.expand_dims(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), '\
# '[1, tf.shape(source(0))[-1]]), 0), 0), [1, tf.shape(source(0))[1], 1, 1])'
eval_encoderT_pos = 'tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), '\
'[tf.shape(source(0))[1], tf.shape(source(0))[-1]]), tf.shape(source(0))[1:]), 0)'
# without batch dim.
#eval_masking = 'tf.logical_and(tf.less_equal(source(0), source(1)), tf.greater_equal(source(0), source(2)))'
eval_masking = 'tf.tile(tf.logical_and(tf.less_equal(source(0), source(1)), tf.greater_equal(source(0), source(2))), '\
'[tf.shape(source(3))[0], 1, 1, 1])'
network[f"{name}_att_energy"]['out_type'] = {'time_dim_axis': 3}
network[f"{name}_win_size"] = {
'class': 'eval',
'eval': eval_win_size,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
network[f"{name}_win_start"] = {
'class': 'eval',
'eval': eval_win_start,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
## normalize the attention weights (depends on key/query dim.)
# network[f"{name}_att_weights"]['window_start'] = f"{name}_win_start"
# network[f"{name}_att_weights"]['window_size'] = f"{name}_win_size"
network[f"{name}_win_end"] = {
'class': 'combine',
'from': [f"{name}_win_start", f"{name}_win_size"],
'kind': 'add'
}
network[f"{name}_encoderT_pos"] = {
'class': 'eval',
'eval': eval_encoderT_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
network[f"{name}_masking"] = {
'class': 'eval',
'eval': eval_masking,
'from': [f"{name}_encoderT_pos", f"{name}_win_end", f"{name}_win_start", f"{name}_att_energy"],
'out_type': {'dtype': 'bool'}
}
network[f"{name}_att_energy_masked"] = {
'class': 'eval',
'eval': f"tf.where(source(0), source(1), "\
f"tf.tile(tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.constant(float('-inf')), 0), 0), 0), 0), tf.shape(source(1))))",
'from': [f"{name}_masking", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
#soft masking: Gaussian window
else:
eval_key_pos = 'tf.cast(tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), ' \
'[tf.shape(source(0))[1], tf.shape(source(0))[-1]]), tf.shape(source(0))[1:]), 0), "float32")'
eval_query_pos = f'tf.cast(tf.expand_dims(tf.tile(tf.expand_dims(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-1]), 0), '\
f'[tf.shape(source(0))[-2], 1]), 0), [{num_heads}, 1, 1]), 0), "float32")'
network[f"{name}_key_pos"] = {
'class': 'eval',
'eval': eval_key_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_query_pos"] = {
'class': 'eval',
'eval': eval_query_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_std_for_gaussian_window"] = {
'class': 'variable',
'init': window_size[0],
'shape': (num_heads,)
}
network[f"{name}_masking"] = {
'class': 'eval',
'eval': f'{half_ratio} * tf.square(source(0) - source(1)) / tf.reshape(tf.square(source(2)), [tf.shape(source(3))[0], {num_heads}, 1, 1])',
'from': [f"{name}_query_pos", f"{name}_key_pos", f"{name}_std_for_gaussian_window", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_att_energy_masked"] = {
'class': 'combine',
'kind': 'add',
'from': [f"{name}_masking", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_att_weights"]['from'] = [f"{name}_att_energy_masked"]
network[f"{name}_att_weights"]['use_time_mask'] = False
else:
network[f"{name}_self_att_att"] = {
'class': "self_attention", 'num_heads': num_heads,
'total_key_dim': key_dim, 'n_out': value_dim,
'from': [f"{name}_self_att_laynorm"],
'attention_left_only': attention_left_only,
'attention_dropout': sa_dropout,
'forward_weights_init': initialization,
}
if relative_pe:
network[f"{name}_rel_pos"] = {
"class": "relative_positional_encoding",
"from": [f"{name}_self_att_laynorm"],
"fixed": fixed,
"clipping": clipping,
"n_out": key_dim // num_heads,
"forward_weights_init": initialization
}
network[f"{name}_self_att_att"]["key_shift"] = f"{name}_rel_pos"
if linear_mapping:
network[f"{name}_self_att_lin"] = {
'class': "linear", 'activation': None, 'with_bias': linear_mapping_bias,
'from': [f"{name}_self_att_att"], 'n_out': model_dim,
'forward_weights_init': initialization
}
network[f"{name}_self_att_drop"] = {
'class': "dropout", 'dropout': sa_post_dropout,
'from': [f"{name}_self_att_lin"]
}
else:
network[f"{name}_self_att_drop"] = {
'class': "dropout", 'dropout': sa_post_dropout,
'from': [f"{name}_self_att_att"]
}
network[f"{name}_self_att_out"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_ff1_out", f"{name}_self_att_drop"],
'n_out': model_dim
}
## convolution module
network[f"{name}_conv_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_self_att_out"]}
## d --> 2d for GLU activation
## can linear as an alternative to pointwise conv.?
network[f"{name}_conv_pointwise1"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_conv_laynorm"], 'n_out': 2 * model_dim,
'forward_weights_init': initialization
}
## (batch, time, feature)
network[f"{name}_conv_GLU"] = {
'class': "gating",
'activation': "identity",
'from': [f"{name}_conv_pointwise1"]
}
out_layer_name = f"{name}_conv_GLU"
if se_pos == "after_first_conv":
# TODO: implement
inpl = f"{name}_conv_GLU"
out_layer_name = add_SE_block(network, inpl, name, se_act)
if normal_conv:
network[f"{name}_conv_expanded"] = {
"class": "split_dims", "axis": "F", "dims": (-1, 1),
"from": [out_layer_name]
}
## (T, F, 1)
network[f"{name}_conv_normal"] = {
"class": "conv",
"from": [f"{name}_conv_expanded"], "padding": "same",
"filter_size": (kernel_size, kernel_size_for_feature),
"n_out": output_channels, "activation": None, "with_bias": True #model_dim//kernel_size
}
network[f"{name}_conv_normal_flattened"] = {
"class": "merge_dims",
"from": [f"{name}_conv_normal"],
"axes": "static"
}
## parameter intensiv
network[f"{name}_conv_transformed"] = {
'class': "linear",
'activation': None,
'with_bias': False,
'forward_weights_init': initialization,
'n_out': model_dim,
"from": [f"{name}_conv_normal_flattened"]
}
network[f"{name}_conv_batchnorm"] = {
'class': "batch_norm",
'from': [f"{name}_conv_transformed"]
}
else:
network[f"{name}_conv_depthwise"] = {
'activation': None,
'class': 'conv',
'filter_size': (kernel_size,),
'from': [out_layer_name],
'groups': model_dim,
'n_out': model_dim,
'padding': 'same',
'with_bias': True
}
out_layer_name = f"{name}_conv_depthwise"
if se_pos == "after_depthwise_conv":
# TODO: implement
inpl = f"{name}_conv_depthwise"
out_layer_name = add_SE_block(network, inpl, name, se_act)
network[f"{name}_conv_batchnorm"] = {
'class': "batch_norm",
'from': [out_layer_name]
}
network[f"{name}_conv_act"] = {
'class': "activation",
'activation': "swish",
'from': [f"{name}_conv_batchnorm"]
}
network[f"{name}_conv_pointwise2"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_conv_act"], 'n_out': model_dim,
'forward_weights_init': initialization
}
out_layer_name = f"{name}_conv_pointwise2"
if se_pos == "after_sec_conv":
# TODO: implement
inpl = f"{name}_conv_pointwise2"
out_layer_name = add_SE_block(network, inpl, name, se_act)
network[f"{name}_conv_dropout"] = {
'class': "dropout", 'dropout': conv_post_dropout,
'from': [out_layer_name],
}
network[f"{name}_conv_output"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_self_att_out", f"{name}_conv_dropout"], 'n_out': model_dim,
}
## second ffn layer
network[f"{name}_ff2_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_conv_output"]}
network[f"{name}_ff2_conv1"] = {
'class': "linear", 'activation': ff_activation, 'with_bias': True,
'from': [f"{name}_ff2_laynorm"],
'n_out': ff_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff2_conv2"] = {
'class': "linear", 'activation': None, 'with_bias': True,
'from': [f"{name}_ff2_conv1"], 'dropout': ff_activation_dropout,
'n_out': model_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff2_drop"] = {'class': "dropout",
'dropout': ff_post_dropout,
'from': [f"{name}_ff2_conv2"]}
network[f"{name}_ff2_drop_half"] = {
'class': "eval",
'eval': f"{half_ratio} * source(0)",
'from': [f"{name}_ff2_drop"]
}
network[f"{name}_ff2_out"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_conv_output", f"{name}_ff2_drop_half"]
}
if switch:
network[f"{name}_conv_output"]['from'] = [f"{name}_ff1_out", f"{name}_conv_dropout"]
network[f"{name}_conv_laynorm"]['from'] = [f"{name}_ff1_out"]
network[f"{name}_self_att_laynorm"]['from'] = [f"{name}_conv_output"]
network[f"{name}_self_att_out"]['from'] = [f"{name}_conv_output", f"{name}_self_att_drop"]
network[f"{name}_ff2_laynorm"]['from'] = [f"{name}_self_att_out"]
network[f"{name}_ff2_out"]['from'] = [f"{name}_self_att_out", f"{name}_ff2_drop_half"]
## final layer norm
if end_layernorm:
network[f"{name}"] = {
'class': "layer_norm",
'from': [f"{name}_ff2_out"]
}
else:
network[f"{name}"] = {
'class': "copy",
'from': [f"{name}_ff2_out"]
}
| 36.580846 | 149 | 0.553296 | [
"MPL-2.0"
] | dierkes-j/i6_experiments | users/schupp/hybrid_hmm_nn/network_builders/layers/conformer_SE_block_layer_dynamic_oneact.py | 29,411 | Python |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
def load_arguments(self, _):
pass
| 37.866667 | 76 | 0.558099 | [
"MIT"
] | tbyfield/azure-cli-extensions | src/fidalgo/azext_fidalgo/generated/_params.py | 568 | Python |
import torch
import numpy as np
def map_per_batch(fun, values, batch_indices):
result = []
for start, stop, value_slice in sliced_per_batch(values, batch_indices):
result.append(fun(start, stop, value_slice))
return torch.cat(result)
def sliced_per_batch(values, batch_indices):
slices = torch.where(batch_indices[:-1] - batch_indices[1:] != 0)[0] + 1
slices = slices.tolist()
slices = zip([0] + slices, slices + [batch_indices.shape[0]])
for start, stop in slices:
yield start, stop, values[start:stop]
def sliced_per_batch_np(values, batch_indices):
slices = np.where(batch_indices[:-1] - batch_indices[1:] != 0)[0] + 1
slices = slices.tolist()
slices = zip([0] + slices, slices + [batch_indices.shape[0]])
for start, stop in slices:
yield start, stop, values[start:stop]
| 32.615385 | 76 | 0.673349 | [
"MIT"
] | penguinmenac3/leanai | leanai/core/indexed_tensor_helpers.py | 848 | Python |
import os
import re
import json
import uuid
from string import Template
from iocbuilder.iocinit import IocDataStream
def debug_print(message, level):
if int(os.getenv("ODIN_BUILDER_DEBUG", 0)) >= level:
print(message)
ADODIN_ROOT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
ADODIN_DATA = os.path.join(ADODIN_ROOT, "data")
def data_file_path(file_name):
return os.path.join(ADODIN_DATA, file_name)
class OdinPaths(object):
@classmethod
def configure_paths(cls, release_path):
paths = cls.parse_release_file(release_path)
cls.HDF5_FILTERS = os.path.join(paths["HDF5_FILTERS"], "prefix/hdf5_1.10/h5plugin")
cls.ODIN_DATA = paths["ODIN_DATA"]
for detector_path in [path for module, path in paths.items()
if module.endswith("DETECTOR")]:
detector_paths = cls.parse_release_file(
os.path.join(detector_path, "configure/RELEASE")
)
if detector_paths["ODIN_DATA"] != cls.ODIN_DATA:
raise EnvironmentError("Mismatched odin-data dependency in {}".format(detector_path))
cls.EIGER_DETECTOR = paths["EIGER_DETECTOR"]
cls.EXCALIBUR_DETECTOR = paths["EXCALIBUR_DETECTOR"]
cls.TRISTAN_DETECTOR = paths["TRISTAN_DETECTOR"]
@classmethod
def parse_release_file(cls, release_path):
macros = {}
with open(release_path) as release_file:
for line in release_file.readlines():
if "=" in line:
module, path = line.split("=", 1)
macros[module.strip()] = path.strip()
macro_re = re.compile(r"\$\(([^\)]+)\)")
for macro in macros:
for find in macro_re.findall(macros[macro]):
if find in macros.keys():
macros[macro] = macros[macro].replace("$({})".format(find), macros[find])
return macros
# Read Odin paths on import
OdinPaths.configure_paths(
os.path.join(ADODIN_ROOT, "configure/RELEASE.local")
)
def expand_template_file(template, macros, output_file, executable=False):
if executable:
mode = 0755
else:
mode = None
with open(os.path.join(ADODIN_DATA, template)) as template_file:
template_config = Template(template_file.read())
output = template_config.substitute(macros)
debug_print("--- {} ----------------------------------------------".format(output_file), 2)
debug_print(output, 2)
debug_print("---", 2)
stream = IocDataStream(output_file, mode)
stream.write(output)
def create_batch_entry(beamline, number, name):
return "{beamline}-EA-ODN-{number:02d} st{name}.sh".format(
beamline=beamline, number=number, name=name
)
class OneLineEntry(object):
"""A wrapper to stop JSON entries being split across multiple lines.
Wrap this around lists, dictionaries, etc to stop json.dumps from
splitting them over multiple lines. Must pass OneLineEncoder to
json.dumps(cls=).
"""
def __init__(self, value):
self.value = value
class OneLineEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super(OneLineEncoder, self).__init__(*args, **kwargs)
self.kwargs = dict(kwargs)
del self.kwargs["indent"]
self._replacement_map = {}
def default(self, o):
if isinstance(o, OneLineEntry):
key = uuid.uuid4().hex
self._replacement_map[key] = json.dumps(o.value, **self.kwargs)
return "@@%s@@" % (key,)
else:
return super(OneLineEncoder, self).default(o)
def encode(self, o):
result = super(OneLineEncoder, self).encode(o)
for key, value in self._replacement_map.iteritems():
result = result.replace("\"@@%s@@\"" % (key,), value)
return result
def create_config_entry(dictionary):
entry = json.dumps(dictionary, indent=2, cls=OneLineEncoder)
return entry.replace("\n", "\n ")
| 30.801527 | 101 | 0.628748 | [
"Apache-2.0"
] | dls-controls/ADOdin | etc/builder/util.py | 4,035 | Python |
"""
Remote platform
This platform uses physical ethernet interfaces.
"""
# Update this dictionary to suit your environment.
remote_port_map = {
(0, 0): "eth0",
(0, 1): "eth1",
(0, 2): "eth2",
(0, 3): "eth3",
(0, 4): "eth4",
(0, 5): "eth5",
(0, 6): "eth6",
(0, 7): "eth7",
(0, 8): "eth8",
(0, 9): "eth9",
(0, 10): "eth10",
(0, 11): "eth11",
(0, 12): "eth12",
(0, 13): "eth13",
(0, 14): "eth14",
(0, 15): "eth15",
(0, 16): "eth16",
(0, 17): "eth17",
(0, 18): "eth18",
(0, 19): "eth19",
(0, 20): "eth20",
(0, 21): "eth21",
(0, 22): "eth22",
(0, 23): "eth23",
(0, 24): "eth24",
(0, 25): "eth25",
(0, 26): "eth26",
(0, 27): "eth27",
(0, 28): "eth28",
(0, 29): "eth29",
(0, 30): "eth30",
(0, 31): "eth31",
}
def platform_config_update(config):
"""
Update configuration for the remote platform
@param config The configuration dictionary to use/update
"""
global remote_port_map
config["port_map"] = remote_port_map.copy()
config["caps_table_idx"] = 0
| 20.849057 | 60 | 0.489593 | [
"Apache-2.0"
] | PJHsieh/MarkHsieh_ptf | src/ptf/platforms/remote.py | 1,105 | Python |
import numpy as np
import pandas as pd
import torch
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from torch.utils.data import DataLoader
from sklearn.ensemble import ExtraTreesClassifier
from parameters import *
from training.evaluation import Evaluate, ClassificationRanker
from training.feature_extraction import FeatureExtraction
from training.train_loop import train_loop
from training.utils import Utils, Datasets
import models as md
# Define Processor
print("1.\t" + str(device.type).capitalize() + " detected\n")
# Preprocess Data
utils = Utils()
featureExtraction = FeatureExtraction()
# validation data
print("2.\tProcessing Resume data for validation ...")
resume = utils.process_resumes(pth, categories, scores, query_name, feature_name)
featureExtraction.generate_features(resume, query_name, feature_name, resume_path)
# train data
print("3.\tProcessing Train data ...")
# utils.clean_save_data(data_train_path, data_test_path, data_valid_path, required_columns, clean_data_path)
# Load Data
print("4.\tLoading Data ...")
valid = utils.load_data(resume_path)
train_test = utils.load_data(clean_data_path)
output_dim = 1#len(train_test.y.unique())
# Train/Test Split
print("5.\tGetting Train/Test/Validation Data ...")
x_train, x_test, x_valid, y_train, y_test, y_valid, qid_train, qid_test, qid_valid = \
utils.split_data(train_test, valid, .05)
print('6.\tTrain: {}\tTest: {}\tValid: {}\tOutput: {}'.format(x_train.shape, x_test.shape, x_valid.shape, output_dim))
print(
'7.\tUnique Query Ids (train: {}\ttest: {}\tvalid: {})'.format(len(np.unique(qid_train)), len(np.unique(qid_test)),
len(np.unique(qid_valid))))
# Define Model
# model = md.RNN(x_train.shape[1], output_dim, hidden2, 2)
# model = md.Model1(x_train.shape[1], hidden1, hidden2, hidden3, output_dim)
# model = md.Model2(output_dim)
model = md.Model4(x_train.shape[1], output_dim)
model.to(device)
print("8.\tModel defined and moved to " + str(device.__str__()))
# Parameters
optimizer = Optimizer(model.parameters())
scheduler = scheduler(optimizer)
print("9.\tCriterion set as " + str(criterion.__str__()))
print("10.\tOptimizer set as " + str(optimizer.__str__()))
# Data Loader
train_dataset = Datasets(y_train, x_train, qid_train)
test_dataset = Datasets(y_test, x_test, qid_test)
valid_dataset = Datasets(y_valid, x_valid, qid_valid)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=56, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)
train_qid, train_labels, train_features = next(iter(train_loader))
print("11.\tDataLoader Shapes-> QID: {}\tLabel: {}\tFeatures: {}".format(train_qid.size(), train_labels.size(),
train_features.size()))
# NN Model
print("12.\tTrain loop")
# train_loop(model, epochs, optimizer, criterion, train_loader, test_loader, valid_loader, k_rank,
# printing_gap, saved_model_device, model_path, device, PIK_plot_data, scheduler)
# Regressor Model
# rfr = RandomForestRegressor(n_estimators=200, min_samples_split=5, random_state=1, n_jobs=-1)
# rfr.fit(x_train, y_train)
# Evaluate().print_evaluation(rfr, x_train, y_train, qid_train, k_rank)
# Evaluate().print_evaluation(rfr, x_test, y_test, qid_test, k_rank)
# Evaluate().print_evaluation(rfr, x_valid, y_valid, qid_valid, k_rank)
# Evaluate().save_model(rfr, reg_model_path)
# SVM Model
sm = svm.SVR()
sm.fit(x_train, y_train)
Evaluate().print_evaluation(sm, x_train, y_train, qid_train, k_rank)
Evaluate().print_evaluation(sm, x_test, y_test, qid_test, k_rank)
Evaluate().print_evaluation(sm, x_valid, y_valid, qid_valid, k_rank)
Evaluate().save_model(sm, svm_model_path)
# Classifier Model
# etc = ClassificationRanker(LogisticRegression(C=1000))
# etc.fit(x_train, y_train)
# Evaluate().print_evaluation(etc, x_train, y_train, qid_train, k_rank)
# Evaluate().print_evaluation(etc, x_test, y_test, qid_test, k_rank)
# Evaluate().print_evaluation(etc, x_valid, y_valid, qid_valid, k_rank)
#
# yp = rfr.predict(x_valid)
# for i, j, k in zip(qid_valid, y_valid, yp):
# print(i, j, k)
| 41.634615 | 119 | 0.742725 | [
"MIT"
] | TonyMTH/Resume-Ranking | training/train.py | 4,330 | Python |
import argparse
import configparser
import pexpect
import re
import os
from ipykernel.kernelbase import Kernel
from . import __version__
""" Macaulay2 Jupyter Kernel
"""
class M2Config:
""""""
def __init__(self, execpath, configpath=os.getenv('M2JK_CONFIG')):
""""""
parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)
config = configparser.ConfigParser(allow_no_value=True)
parser.add_argument('--timeout', type=int, default=2)
parser.add_argument('--timeout_startup', type=int, default=5)
parser.add_argument('--mode', choices=['default', 'original', 'texmacs', 'pretty'],
default='default')
# parser.add_argument('--debug', default=False,
# type=lambda x: True if x.lower() in ['1','true','on'] else False)
parser.add_argument('--theme', choices=['default', 'emacs'], default='default')
# execpath is now mutable, but modifying it is no-op. fix this
parser.add_argument('--execpath', default=execpath)
parser.add_argument('--version', action='store_const', const=__version__, default=__version__)
parser.add_argument('--configpath', action='store_const', const=configpath, default=configpath)
parser.add_argument('--config')
args = parser.parse_args('')
if configpath:
config.read(configpath)
line = ' '.join(['--{} {}'.format(key, val) for key, val in config.items('magic')])
args = parser.parse_args(line.split(), args)
self.parser = parser
self.config = config
self.args = args
def read(self, line):
""""""
self.config.remove_section('temp')
try:
self.config.read_string('[temp]\n'+line)
key, val = self.config.items('temp')[0]
if key in self.args:
self.args = self.parser.parse_args('--{} {}'.format(key, val).split(), self.args)
val = self.args.__dict__[key]
msg = '[magic succeeded] {} = {}'.format(key, val)
except:
key, val = None, None
msg = '[magic failed]'
return key, val, msg
class M2Interp:
""" an interpreter for Macaulay2
"""
patt_input = re.compile(br'^i(\d+)\s:')
debug = False
def __init__(self, execpath=pexpect.which('M2'), timeout=4, configpath=None):
""""""
self.conf = M2Config(execpath, configpath)
self.proc = None
self.proc_command = self.conf.args.execpath
self.proc_kwargs = {
'args': ['--silent', '--no-debug', '-e', 'load("init.m2")'],
'cwd': os.path.dirname(__file__) + '/assets/m2-code/',
'timeout': timeout
}
def start(self):
""""""
if not (self.proc is None):
return
self.proc = pexpect.spawn(self.proc_command, **self.proc_kwargs)
self.proc.delaybeforesend = None
def preprocess(self, code, usemagic, printwidth=80):
""""""
magic_lines = []
code_lines = []
for line in code.splitlines():
trimmed = line.lstrip()
if not trimmed:
continue
elif usemagic and trimmed.startswith('--%'):
key, val, msg = self.conf.read(trimmed[3:])
cmd = ''
if key == 'timeout':
self.proc.timeout = val
elif key == 'mode':
if val == 'original':
self.debug = True
else:
self.debug = False
if val == 'texmacs':
cmd = 'mode(true);'
else:
cmd = 'mode(false);'
magic_lines.append(cmd + ' << "{}";--CMD'.format(msg))
elif trimmed.startswith('--'):
continue
else:
code_lines.append(line+'--CMD')
if magic_lines or code_lines:
return 'noop(begin)--CMD\n{}\nnoop(end)--CMD--EOB'.format('\n'.join(magic_lines+code_lines))
return ''
def execute(self, code, lastonly=True, usemagic=True):
""""""
clean_code = self.preprocess(code, usemagic=usemagic)
if self.debug: print(clean_code)
if not clean_code: return []
try:
return self.repl(clean_code, lastonly=lastonly)
except Exception as e:
# kill M2 execution
# self.proc.sendcontrol('c')
# clear buffer - this is not great but works - fix it
# for line in self.proc:
# if line.endswith(b'--EOB'): break
# rethrow
raise e
def repl(self, clean_code, lastonly):
""" REPL
If `self.debug==True` then result is the raw list of lines of bytes,
otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines),
where again the last 3 entries are lists of lines of bytes.
"""
self.proc.sendline(clean_code)
EOT = False
debug_lines = []
nodes = []
node = ()
linenumber = None
state = None
# make sure you are not reading an echo!
# this is important! echo occurs often especially when using M2Interp.execute() directly
# https://pexpect.readthedocs.io/en/stable/commonissues.html#timing-issue-with-send-and-sendline
for echoline in self.proc:
if echoline[:1] == b'i' and echoline.endswith(b'noop(begin)--CMD\r\n'):
break
while not EOT:
try:
for testline in self.proc:
line = testline[:-2]
if self.debug: print(line)
break
except pexpect.TIMEOUT:
self.proc.sendcontrol('c')
self.proc.read(1) # this is VERY IMPORTANT!
if node:
node[1].append('\r\no{} = [KERNEL ENFORCED TIMEOUT]'.format(linenumber).encode())
nodes.append(node)
return debug_lines if self.debug else nodes
if line.endswith(b'--EOB'):
EOT = True
if self.debug:
debug_lines.append(line)
continue
if line.endswith(b'--CMD'):
newinput = self.patt_input.match(line)
if newinput:
if node:
if lastonly:
nodes.append((node[0],node[1],[],[]))
else:
nodes.append(node)
linenumber = int(newinput.groups()[0])
node = (linenumber,[],[],[])
state = 'CMD'
elif line.endswith(b'--VAL'):
state = 'VAL'
elif line.endswith(b'--CLS'):
state = 'CLS'
else: # inside one of the states
if state=='CMD': # stdout
node[1].append(line)
elif state=='VAL':
node[2].append(line)
elif state=='CLS':
node[3].append(line)
# trim the empty trailing line coming from next input line
if not node:
pass
elif node[2]:
nodes.append((node[0],node[1],node[2],node[3][:-1]))
else:
nodes.append((node[0],node[1][:-1],[],[]))
return debug_lines if self.debug else nodes
class M2Kernel(Kernel):
""" the M2 kernel for Jupyter
"""
implementation = 'macaulay2_jupyter_kernel'
implementation_version = __version__
language = 'Macaulay2'
language_version = '1.13.0.1' # "defining implementation" version
language_info = {
'name': 'Macaulay2',
'mimetype': 'text/x-macaulay2',
'file_extension': '.m2',
'codemirror_mode': 'macaulay2',
# 'pigments_lexer': None,
}
banner = 'Jupyter Kernel for Macaulay2 (v{})'.format(implementation_version)
help_links = [{
'text': 'M2JK Demo',
'url': 'https://nbviewer.jupyter.org/github/radoslavraynov/Macaulay2-Jupyter-Kernel/blob/master/demo/demo.ipynb'
}]
def __init__(self, *args, **kwargs):
""" kernel init - calls __init__ on the parent and sets up the M2Interp object
"""
super().__init__(*args, **kwargs)
self.interp = M2Interp(configpath=os.environ.get('M2JK_CONFIG'))
self.interp.start()
def process_output(self, nodes):
"""
"""
mode = self.interp.conf.args.mode
if mode == 'original':
clean_lines = []
for ln in nodes:
if ln.endswith(b'--EOB') or ln.endswith(b'--VAL') or ln.endswith(b'--CLS'):
pass
elif ln.endswith(b'--CMD'):
clean_lines.append(ln[:-5])
else:
clean_lines.append(ln)
return None, b'\n'.join(clean_lines).decode()
elif self.interp.debug:
return nodes
elif mode == 'default':
lines = [ln.decode() for node in nodes for part in node[1:] for ln in part]
return None, '\n'.join(lines)
stdout = '\n'.join([ln.decode() for node in nodes for ln in node[1]])
if mode == 'texmacs':
value_lines = nodes[-1][2]
if value_lines:
dirty = '\n'.join([ln.decode() for ln in value_lines])
clean = dirty[6:] + '\n</math>'
return {'text/html': clean}, stdout
elif mode == 'pretty':
margin = len(str(nodes[-1][0]))+4
textval = '\n'.join([ln[margin:].decode() for ln in nodes[-1][2]])
textcls = '\n'.join([ln[margin:].decode() for ln in nodes[-1][3]])
html = '<pre>{}</pre><pre style="color: gray">{}</pre>'.format(textval, textcls)
return {'text/html': html}, stdout
return None, stdout
def send_stream(self, text, stderr=False):
""" enqueues a stdout or stderr message for the given cell
"""
stdfile = 'stderr' if stderr else 'stdout'
content = {'name': stdfile, 'text': text+'\n'}
self.send_response(self.iopub_socket, 'stream', content)
def mock_execute(self, code):
""""""
output_lines = self.interp.execute(code, lastonly=False)
return self.process_output(output_lines)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
""" kernel entry point for the execution of each cell
"""
try:
output_lines = self.interp.execute(code)
except Exception as e:
output_lines = []
self.send_stream(str(e), True)
xcount = None
if not silent:
if not output_lines:
return {'status': 'ok',
'execution_count': None,
'payload': [],
'user_expressions': {}}
data, stream = self.process_output(output_lines)
xcount = output_lines[-1][0]
if stream:
stdout_content = {'name': 'stdout', 'text': stream}
self.send_response(self.iopub_socket, 'stream', stdout_content)
if data:
execute_content = {'data': data, 'execution_count': xcount}
self.send_response(self.iopub_socket, 'execute_result', execute_content)
return {'status': 'ok',
'execution_count': xcount,
'payload': [],
'user_expressions': {}}
| 36.977918 | 120 | 0.524484 | [
"MIT"
] | MWhybrow92/Macaulay2-Jupyter-Kernel | m2_kernel/kernel.py | 11,722 | Python |
#!/usr/bin/env python3
import argparse
import common
import functools
import multiprocessing
import os
import os.path
import pathlib
import re
import subprocess
import stat
import sys
import traceback
import shutil
import paths
EXCLUDED_PREFIXES = ("./generated/", "./thirdparty/", "./build", "./.git/", "./bazel-", "./.cache",
"./source/extensions/extensions_build_config.bzl",
"./bazel/toolchains/configs/", "./tools/testdata/check_format/",
"./tools/pyformat/")
SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto",
".rst")
DOCS_SUFFIX = (".md", ".rst")
PROTO_SUFFIX = (".proto")
# Files in these paths can make reference to protobuf stuff directly
GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test")
REPOSITORIES_BZL = "bazel/repositories.bzl"
# Files matching these exact names can reference real-world time. These include the class
# definitions for real-world time, the construction of them in main(), and perf annotation.
# For now it includes the validation server but that really should be injected too.
REAL_TIME_WHITELIST = ("./source/common/common/utility.h",
"./source/extensions/filters/http/common/aws/utility.cc",
"./source/common/event/real_time_system.cc",
"./source/common/event/real_time_system.h", "./source/exe/main_common.cc",
"./source/exe/main_common.h", "./source/server/config_validation/server.cc",
"./source/common/common/perf_annotation.h",
"./test/test_common/simulated_time_system.cc",
"./test/test_common/simulated_time_system.h",
"./test/test_common/test_time.cc", "./test/test_common/test_time.h",
"./test/test_common/utility.cc", "./test/test_common/utility.h",
"./test/integration/integration.h")
# Files in these paths can use MessageLite::SerializeAsString
SERIALIZE_AS_STRING_WHITELIST = (
"./source/common/config/version_converter.cc",
"./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc",
"./test/common/protobuf/utility_test.cc",
"./test/common/grpc/codec_test.cc",
"./test/common/grpc/codec_fuzz_test.cc",
)
# Files in these paths can use Protobuf::util::JsonStringToMessage
JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc")
# Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing
# ones were grandfathered as part of PR #8484 for backwards compatibility.
HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms",
"initialization_time_ms", "loop_duration_us", "poll_delay_us",
"request_time_ms", "upstream_cx_connect_ms",
"upstream_cx_length_ms")
# Files in these paths can use std::regex
STD_REGEX_WHITELIST = ("./source/common/common/utility.cc", "./source/common/common/regex.h",
"./source/common/common/regex.cc",
"./source/common/stats/tag_extractor_impl.h",
"./source/common/stats/tag_extractor_impl.cc",
"./source/common/access_log/access_log_formatter.cc",
"./source/extensions/filters/http/squash/squash_filter.h",
"./source/extensions/filters/http/squash/squash_filter.cc",
"./source/server/http/admin.h", "./source/server/http/admin.cc",
"./tools/clang_tools/api_booster/main.cc",
"./tools/clang_tools/api_booster/proto_cxx_utils.cc")
# Only one C++ file should instantiate grpc_init
GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc")
CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9")
BUILDIFIER_PATH = paths.getBuildifier()
BUILDOZER_PATH = paths.getBuildozer()
ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
"envoy_build_fixer.py")
HEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py")
SUBDIR_SET = set(common.includeDirOrder())
INCLUDE_ANGLE = "#include <"
INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE)
PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE)
X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*')
# yapf: disable
PROTOBUF_TYPE_ERRORS = {
# Well-known types should be referenced from the ProtobufWkt namespace.
"Protobuf::Any": "ProtobufWkt::Any",
"Protobuf::Empty": "ProtobufWkt::Empty",
"Protobuf::ListValue": "ProtobufWkt::ListValue",
"Protobuf::NULL_VALUE": "ProtobufWkt::NULL_VALUE",
"Protobuf::StringValue": "ProtobufWkt::StringValue",
"Protobuf::Struct": "ProtobufWkt::Struct",
"Protobuf::Value": "ProtobufWkt::Value",
# Other common mis-namespacing of protobuf types.
"ProtobufWkt::Map": "Protobuf::Map",
"ProtobufWkt::MapPair": "Protobuf::MapPair",
"ProtobufUtil::MessageDifferencer": "Protobuf::util::MessageDifferencer"
}
LIBCXX_REPLACEMENTS = {
"absl::make_unique<": "std::make_unique<",
}
UNOWNED_EXTENSIONS = {
"extensions/filters/http/ratelimit",
"extensions/filters/http/buffer",
"extensions/filters/http/rbac",
"extensions/filters/http/ip_tagging",
"extensions/filters/http/tap",
"extensions/filters/http/health_check",
"extensions/filters/http/cors",
"extensions/filters/http/ext_authz",
"extensions/filters/http/dynamo",
"extensions/filters/http/lua",
"extensions/filters/http/common",
"extensions/filters/common",
"extensions/filters/common/ratelimit",
"extensions/filters/common/rbac",
"extensions/filters/common/lua",
"extensions/filters/listener/original_dst",
"extensions/filters/listener/proxy_protocol",
"extensions/stat_sinks/statsd",
"extensions/stat_sinks/common",
"extensions/stat_sinks/common/statsd",
"extensions/health_checkers/redis",
"extensions/access_loggers/grpc",
"extensions/access_loggers/file",
"extensions/common/tap",
"extensions/transport_sockets/raw_buffer",
"extensions/transport_sockets/tap",
"extensions/tracers/zipkin",
"extensions/tracers/dynamic_ot",
"extensions/tracers/opencensus",
"extensions/tracers/lightstep",
"extensions/tracers/common",
"extensions/tracers/common/ot",
"extensions/retry/host/previous_hosts",
"extensions/filters/network/ratelimit",
"extensions/filters/network/client_ssl_auth",
"extensions/filters/network/rbac",
"extensions/filters/network/tcp_proxy",
"extensions/filters/network/echo",
"extensions/filters/network/ext_authz",
"extensions/filters/network/redis_proxy",
"extensions/filters/network/kafka",
"extensions/filters/network/kafka/protocol",
"extensions/filters/network/kafka/serialization",
"extensions/filters/network/mongo_proxy",
"extensions/filters/network/common",
"extensions/filters/network/common/redis",
}
# yapf: enable
# Map a line transformation function across each line of a file.
# .bak temporaries.
def replaceLines(path, line_xform):
# We used to use fileinput in the older Python 2.7 script, but this doesn't do
# inplace mode and UTF-8 in Python 3, so doing it the manual way.
output_lines = [line_xform(line) for line in readLines(path)]
pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8')
# Obtain all the lines in a given file.
def readLines(path):
return readFile(path).split('\n')
# Read a UTF-8 encoded file as a str.
def readFile(path):
return pathlib.Path(path).read_text(encoding='utf-8')
# lookPath searches for the given executable in all directories in PATH
# environment variable. If it cannot be found, empty string is returned.
def lookPath(executable):
for path_dir in os.environ["PATH"].split(os.pathsep):
executable_path = os.path.join(path_dir, executable)
if os.path.exists(executable_path):
return executable_path
return ""
# pathExists checks whether the given path exists. This function assumes that
# the path is absolute and evaluates environment variables.
def pathExists(executable):
return os.path.exists(os.path.expandvars(executable))
# executableByOthers checks whether the given path has execute permission for
# others.
def executableByOthers(executable):
st = os.stat(os.path.expandvars(executable))
return bool(st.st_mode & stat.S_IXOTH)
# Check whether all needed external tools (clang-format, buildifier, buildozer) are
# available.
def checkTools():
error_messages = []
clang_format_abs_path = lookPath(CLANG_FORMAT_PATH)
if clang_format_abs_path:
if not executableByOthers(clang_format_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(CLANG_FORMAT_PATH))
else:
error_messages.append(
"Command {} not found. If you have clang-format in version 8.x.x "
"installed, but the binary name is different or it's not available in "
"PATH, please use CLANG_FORMAT environment variable to specify the path. "
"Examples:\n"
" export CLANG_FORMAT=clang-format-9.0.0\n"
" export CLANG_FORMAT=/opt/bin/clang-format-9\n"
" export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH))
def checkBazelTool(name, path, var):
bazel_tool_abs_path = lookPath(path)
if bazel_tool_abs_path:
if not executableByOthers(bazel_tool_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
elif pathExists(path):
if not executableByOthers(path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
else:
error_messages.append(
"Command {} not found. If you have buildifier installed, but the binary "
"name is different or it's not available in $GOPATH/bin, please use "
"{} environment variable to specify the path. Example:\n"
" export {}=/opt/bin/buildifier\n"
"If you don't have buildifier installed, you can install it by:\n"
" go get -u github.com/bazelbuild/buildtools/{}".format(path, var, var, name))
checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN')
checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN')
return error_messages
def checkNamespace(file_path):
for excluded_path in namespace_check_excluded_paths:
if file_path.startswith(excluded_path):
return []
nolint = "NOLINT(namespace-%s)" % namespace_check.lower()
text = readFile(file_path)
if not re.search("^\s*namespace\s+%s\s*{" % namespace_check, text, re.MULTILINE) and \
not nolint in text:
return ["Unable to find %s namespace or %s for file: %s" % (namespace_check, nolint, file_path)]
return []
def packageNameForProto(file_path):
package_name = None
error_message = []
result = PROTO_PACKAGE_REGEX.search(readFile(file_path))
if result is not None and len(result.groups()) == 1:
package_name = result.group(1)
if package_name is None:
error_message = ["Unable to find package name for proto file: %s" % file_path]
return [package_name, error_message]
# To avoid breaking the Lyft import, we just check for path inclusion here.
def whitelistedForProtobufDeps(file_path):
return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \
any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST))
# Real-world time sources should not be instantiated in the source, except for a few
# specific cases. They should be passed down from where they are instantied to where
# they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager.
def whitelistedForRealTime(file_path):
if file_path.endswith(".md"):
return True
return file_path in REAL_TIME_WHITELIST
def whitelistedForSerializeAsString(file_path):
return file_path in SERIALIZE_AS_STRING_WHITELIST
def whitelistedForJsonStringToMessage(file_path):
return file_path in JSON_STRING_TO_MESSAGE_WHITELIST
def whitelistedForHistogramSiSuffix(name):
return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST
def whitelistedForStdRegex(file_path):
return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith(
DOCS_SUFFIX)
def whitelistedForGrpcInit(file_path):
return file_path in GRPC_INIT_WHITELIST
def whitelistedForUnpackTo(file_path):
return file_path.startswith("./test") or file_path in [
"./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h"
]
def findSubstringAndReturnError(pattern, file_path, error_message):
text = readFile(file_path)
if pattern in text:
error_messages = [file_path + ": " + error_message]
for i, line in enumerate(text.splitlines()):
if pattern in line:
error_messages.append(" %s:%s" % (file_path, i + 1))
return error_messages
return []
def errorIfNoSubstringFound(pattern, file_path, error_message):
return [] if pattern in readFile(file_path) else [file_path + ": " + error_message]
def isApiFile(file_path):
return file_path.startswith(args.api_prefix) or file_path.startswith(args.api_shadow_prefix)
def isBuildFile(file_path):
basename = os.path.basename(file_path)
if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"):
return True
return False
def isExternalBuildFile(file_path):
return isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or
file_path.startswith("./tools/clang_tools"))
def isSkylarkFile(file_path):
return file_path.endswith(".bzl")
def isWorkspaceFile(file_path):
return os.path.basename(file_path) == "WORKSPACE"
def isBuildFixerExcludedFile(file_path):
for excluded_path in build_fixer_check_excluded_paths:
if file_path.startswith(excluded_path):
return True
return False
def hasInvalidAngleBracketDirectory(line):
if not line.startswith(INCLUDE_ANGLE):
return False
path = line[INCLUDE_ANGLE_LEN:]
slash = path.find("/")
if slash == -1:
return False
subdir = path[0:slash]
return subdir in SUBDIR_SET
VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* [a-z \-_]*: [a-z:`]")
VERSION_HISTORY_NEW_RELEASE_REGEX = re.compile("^====[=]+$")
def checkCurrentReleaseNotes(file_path, error_messages):
in_current_release = False
for line_number, line in enumerate(readLines(file_path)):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
if VERSION_HISTORY_NEW_RELEASE_REGEX.match(line):
# If we were in the section for the current release this means we have passed it.
if in_current_release:
break
# If we see a version marker we are now in the section for the current release.
in_current_release = True
if line.startswith("*") and not VERSION_HISTORY_NEW_LINE_REGEX.match(line):
reportError("Version history line malformed. "
"Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line)
def checkFileContents(file_path, checker):
error_messages = []
if file_path.endswith("version_history.rst"):
# Version file checking has enough special cased logic to merit its own checks.
# This only validates entries for the current release as very old release
# notes have a different format.
checkCurrentReleaseNotes(file_path, error_messages)
for line_number, line in enumerate(readLines(file_path)):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
checker(line, file_path, reportError)
return error_messages
DOT_MULTI_SPACE_REGEX = re.compile("\\. +")
def fixSourceLine(line):
# Strip double space after '.' This may prove overenthusiastic and need to
# be restricted to comments and metadata files but works for now.
line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line)
if hasInvalidAngleBracketDirectory(line):
line = line.replace("<", '"').replace(">", '"')
# Fix incorrect protobuf namespace references.
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
line = line.replace(invalid_construct, valid_construct)
# Use recommended cpp stdlib
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
line = line.replace(invalid_construct, valid_construct)
return line
# We want to look for a call to condvar.waitFor, but there's no strong pattern
# to the variable name of the condvar. If we just look for ".waitFor" we'll also
# pick up time_system_.waitFor(...), and we don't want to return true for that
# pattern. But in that case there is a strong pattern of using time_system in
# various spellings as the variable name.
def hasCondVarWaitFor(line):
wait_for = line.find(".waitFor(")
if wait_for == -1:
return False
preceding = line[0:wait_for]
if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \
preceding.endswith("time_system_"):
return False
return True
# Determines whether the filename is either in the specified subdirectory, or
# at the top level. We consider files in the top level for the benefit of
# the check_format testcases in tools/testdata/check_format.
def isInSubdir(filename, *subdirs):
# Skip this check for check_format's unit-tests.
if filename.count("/") <= 1:
return True
for subdir in subdirs:
if filename.startswith('./' + subdir + '/'):
return True
return False
def checkSourceLine(line, file_path, reportError):
# Check fixable errors. These may have been fixed already.
if line.find(". ") != -1:
reportError("over-enthusiastic spaces")
if isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line):
reportError(
"Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue.")
if hasInvalidAngleBracketDirectory(line):
reportError("envoy includes should not have angle brackets")
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
if invalid_construct in line:
reportError("incorrect protobuf type reference %s; "
"should be %s" % (invalid_construct, valid_construct))
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
if invalid_construct in line:
reportError("term %s should be replaced with standard library term %s" %
(invalid_construct, valid_construct))
# Do not include the virtual_includes headers.
if re.search("#include.*/_virtual_includes/", line):
reportError("Don't include the virtual includes headers.")
# Some errors cannot be fixed automatically, and actionable, consistent,
# navigable messages should be emitted to make it easy to find and fix
# the errors by hand.
if not whitelistedForProtobufDeps(file_path):
if '"google/protobuf' in line or "google::protobuf" in line:
reportError("unexpected direct dependency on google.protobuf, use "
"the definitions in common/protobuf/protobuf.h instead.")
if line.startswith("#include <mutex>") or line.startswith("#include <condition_variable"):
# We don't check here for std::mutex because that may legitimately show up in
# comments, for example this one.
reportError("Don't use <mutex> or <condition_variable*>, switch to "
"Thread::MutexBasicLockable in source/common/common/thread.h")
if line.startswith("#include <shared_mutex>"):
# We don't check here for std::shared_timed_mutex because that may
# legitimately show up in comments, for example this one.
reportError("Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.")
if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line:
if "RealTimeSource" in line or \
("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \
"std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \
"std::this_thread::sleep_for" in line or hasCondVarWaitFor(line):
reportError("Don't reference real-world time sources from production code; use injection")
if not whitelistedForUnpackTo(file_path):
if "UnpackTo" in line:
reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
# Check that we use the absl::Time library
if "std::get_time" in line:
if "test/" in file_path:
reportError("Don't use std::get_time; use TestUtility::parseTime in tests")
else:
reportError("Don't use std::get_time; use the injectable time system")
if "std::put_time" in line:
reportError("Don't use std::put_time; use absl::Time equivalent instead")
if "gmtime" in line:
reportError("Don't use gmtime; use absl::Time equivalent instead")
if "mktime" in line:
reportError("Don't use mktime; use absl::Time equivalent instead")
if "localtime" in line:
reportError("Don't use localtime; use absl::Time equivalent instead")
if "strftime" in line:
reportError("Don't use strftime; use absl::FormatTime instead")
if "strptime" in line:
reportError("Don't use strptime; use absl::FormatTime instead")
if "std::atomic_" in line:
# The std::atomic_* free functions are functionally equivalent to calling
# operations on std::atomic<T> objects, so prefer to use that instead.
reportError("Don't use free std::atomic_* functions, use std::atomic<T> members instead.")
if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h":
# __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that
# can be used instead
reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined "
"in include/envoy/common/platform.h instead")
if re.search("\{\s*\.\w+\s*\=", line):
# Designated initializers are not part of the C++14 standard and are not supported
# by MSVC
reportError("Don't use designated initializers in struct initialization, "
"they are not part of C++14")
if " ?: " in line:
# The ?: operator is non-standard, it is a GCC extension
reportError("Don't use the '?:' operator, it is a non-standard GCC extension")
if line.startswith("using testing::Test;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if line.startswith("using testing::TestWithParams;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line:
# The MessageLite::SerializeAsString doesn't generate deterministic serialization,
# use MessageUtil::hash instead.
reportError(
"Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead."
)
if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line:
# Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing
# behavior.
reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.")
if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \
('.counter(' in line or '.gauge(' in line or '.histogram(' in line):
reportError("Don't lookup stats by name at runtime; use StatName saved during construction")
if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line):
reportError("Don't use mangled Protobuf names for enum constants")
hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line)
if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)):
reportError(
"Don't suffix histogram names with the unit symbol, "
"it's already part of the histogram object and unit-supporting sinks can use this information natively, "
"other sinks can add the suffix automatically on flush should they prefer to do so.")
if not whitelistedForStdRegex(file_path) and "std::regex" in line:
reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
if not whitelistedForGrpcInit(file_path):
grpc_init_or_shutdown = line.find("grpc_init()")
grpc_shutdown = line.find("grpc_shutdown()")
if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and
grpc_shutdown < grpc_init_or_shutdown):
grpc_init_or_shutdown = grpc_shutdown
if grpc_init_or_shutdown != -1:
comment = line.find("// ")
if comment == -1 or comment > grpc_init_or_shutdown:
reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " +
"Grpc::GoogleGrpcContext. See #8282")
def checkBuildLine(line, file_path, reportError):
if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")):
reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel")
if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line:
reportError("unexpected direct external dependency on protobuf, use "
"//source/common/protobuf instead.")
if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and
not isExternalBuildFile(file_path) and "@envoy//" in line):
reportError("Superfluous '@envoy//' prefix")
def fixBuildLine(file_path, line):
if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and
not isExternalBuildFile(file_path)):
line = line.replace("@envoy//", "//")
return line
def fixBuildPath(file_path):
replaceLines(file_path, functools.partial(fixBuildLine, file_path))
error_messages = []
# TODO(htuch): Add API specific BUILD fixer script.
if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile(
file_path) and not isWorkspaceFile(file_path):
if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0:
error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path]
if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0:
error_messages += ["buildifier rewrite failed for file: %s" % file_path]
return error_messages
def checkBuildPath(file_path):
error_messages = []
if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile(
file_path) and not isWorkspaceFile(file_path):
command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)
error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path)
if isBuildFile(file_path) and (file_path.startswith(args.api_prefix + "envoy") or
file_path.startswith(args.api_shadow_prefix + "envoy")):
found = False
for line in readLines(file_path):
if "api_proto_package(" in line:
found = True
break
if not found:
error_messages += ["API build file does not provide api_proto_package()"]
command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path)
error_messages += executeCommand(command, "buildifier check failed", file_path)
error_messages += checkFileContents(file_path, checkBuildLine)
return error_messages
def fixSourcePath(file_path):
replaceLines(file_path, fixSourceLine)
error_messages = []
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += fixHeaderOrder(file_path)
error_messages += clangFormat(file_path)
if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path):
package_name, error_message = packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
def checkSourcePath(file_path):
error_messages = checkFileContents(file_path, checkSourceLine)
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += checkNamespace(file_path)
command = ("%s --include_dir_order %s --path %s | diff %s -" %
(HEADER_ORDER_PATH, include_dir_order, file_path, file_path))
error_messages += executeCommand(command, "header_order.py check failed", file_path)
command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path))
error_messages += executeCommand(command, "clang-format check failed", file_path)
if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path):
package_name, error_message = packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
# Example target outputs are:
# - "26,27c26"
# - "12,13d13"
# - "7a8,9"
def executeCommand(command,
error_message,
file_path,
regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")):
try:
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip()
if output:
return output.decode('utf-8').split("\n")
return []
except subprocess.CalledProcessError as e:
if (e.returncode != 0 and e.returncode != 1):
return ["ERROR: something went wrong while executing: %s" % e.cmd]
# In case we can't find any line numbers, record an error message first.
error_messages = ["%s for file: %s" % (error_message, file_path)]
for line in e.output.decode('utf-8').splitlines():
for num in regex.findall(line):
error_messages.append(" %s:%s" % (file_path, num))
return error_messages
def fixHeaderOrder(file_path):
command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, include_dir_order,
file_path)
if os.system(command) != 0:
return ["header_order.py rewrite error: %s" % (file_path)]
return []
def clangFormat(file_path):
command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path)
if os.system(command) != 0:
return ["clang-format rewrite error: %s" % (file_path)]
return []
def checkFormat(file_path):
if file_path.startswith(EXCLUDED_PREFIXES):
return []
if not file_path.endswith(SUFFIXES):
return []
error_messages = []
# Apply fixes first, if asked, and then run checks. If we wind up attempting to fix
# an issue, but there's still an error, that's a problem.
try_to_fix = operation_type == "fix"
if isBuildFile(file_path) or isSkylarkFile(file_path) or isWorkspaceFile(file_path):
if try_to_fix:
error_messages += fixBuildPath(file_path)
error_messages += checkBuildPath(file_path)
else:
if try_to_fix:
error_messages += fixSourcePath(file_path)
error_messages += checkSourcePath(file_path)
if error_messages:
return ["From %s" % file_path] + error_messages
return error_messages
def checkFormatReturnTraceOnError(file_path):
"""Run checkFormat and return the traceback of any exception."""
try:
return checkFormat(file_path)
except:
return traceback.format_exc().split("\n")
def checkOwners(dir_name, owned_directories, error_messages):
"""Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS
Args:
dir_name: the directory being checked.
owned_directories: directories currently listed in CODEOWNERS.
error_messages: where to put an error message for new unowned directories.
"""
found = False
for owned in owned_directories:
if owned.startswith(dir_name) or dir_name.startswith(owned):
found = True
if not found and dir_name not in UNOWNED_EXTENSIONS:
error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name)
def checkFormatVisitor(arg, dir_name, names):
"""Run checkFormat in parallel for the given files.
Args:
arg: a tuple (pool, result_list, owned_directories, error_messages)
pool and result_list are for starting tasks asynchronously.
owned_directories tracks directories listed in the CODEOWNERS file.
error_messages is a list of string format errors.
dir_name: the parent directory of the given files.
names: a list of file names.
"""
# Unpack the multiprocessing.Pool process pool and list of results. Since
# python lists are passed as references, this is used to collect the list of
# async results (futures) from running checkFormat and passing them back to
# the caller.
pool, result_list, owned_directories, error_messags = arg
# Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded
# manner as it is a small and limited list.
source_prefix = './source/'
full_prefix = './source/extensions/'
# Check to see if this directory is a subdir under /source/extensions
# Also ignore top level directories under /source/extensions since we don't
# need owners for source/extensions/access_loggers etc, just the subdirectories.
if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]:
checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages)
for file_name in names:
result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,))
result_list.append(result)
# checkErrorMessages iterates over the list with error messages and prints
# errors and returns a bool based on whether there were any errors.
def checkErrorMessages(error_messages):
if error_messages:
for e in error_messages:
print("ERROR: %s" % e)
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check or fix file format.")
parser.add_argument("operation_type",
type=str,
choices=["check", "fix"],
help="specify if the run should 'check' or 'fix' format.")
parser.add_argument(
"target_path",
type=str,
nargs="?",
default=".",
help="specify the root directory for the script to recurse over. Default '.'.")
parser.add_argument("--add-excluded-prefixes",
type=str,
nargs="+",
help="exclude additional prefixes.")
parser.add_argument("-j",
"--num-workers",
type=int,
default=multiprocessing.cpu_count(),
help="number of worker processes to use; defaults to one per core.")
parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.")
parser.add_argument("--api-shadow-prefix",
type=str,
default="./generated_api_shadow/",
help="path of the shadow API tree.")
parser.add_argument("--skip_envoy_build_rule_check",
action="store_true",
help="skip checking for '@envoy//' prefix in build rules.")
parser.add_argument("--namespace_check",
type=str,
nargs="?",
default="Envoy",
help="specify namespace check string. Default 'Envoy'.")
parser.add_argument("--namespace_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from the namespace_check.")
parser.add_argument("--build_fixer_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from envoy_build_fixer check.")
parser.add_argument("--include_dir_order",
type=str,
default=",".join(common.includeDirOrder()),
help="specify the header block include directory order.")
args = parser.parse_args()
operation_type = args.operation_type
target_path = args.target_path
envoy_build_rule_check = not args.skip_envoy_build_rule_check
namespace_check = args.namespace_check
namespace_check_excluded_paths = args.namespace_check_excluded_paths + [
"./tools/api_boost/testdata/",
"./tools/clang_tools/",
]
build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [
"./bazel/external/",
"./bazel/toolchains/",
"./bazel/BUILD",
"./tools/clang_tools",
]
include_dir_order = args.include_dir_order
if args.add_excluded_prefixes:
EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes)
# Check whether all needed external tools are available.
ct_error_messages = checkTools()
if checkErrorMessages(ct_error_messages):
sys.exit(1)
# Returns the list of directories with owners listed in CODEOWNERS. May append errors to
# error_messages.
def ownedDirectories(error_messages):
owned = []
maintainers = [
'@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@junr03',
'@dnoe', '@dio', '@jmarantz'
]
try:
with open('./CODEOWNERS') as f:
for line in f:
# If this line is of the form "extensions/... @owner1 @owner2" capture the directory
# name and store it in the list of directories with documented owners.
m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line)
if m is not None and not line.startswith('#'):
owned.append(m.group(1).strip())
owners = re.findall('@\S+', m.group(2).strip())
if len(owners) < 2:
error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n"
" {}".format(line))
maintainer = len(set(owners).intersection(set(maintainers))) > 0
if not maintainer:
error_messages.append("Extensions require at least one maintainer OWNER:\n"
" {}".format(line))
return owned
except IOError:
return [] # for the check format tests.
# Calculate the list of owned directories once per run.
error_messages = []
owned_directories = ownedDirectories(error_messages)
if os.path.isfile(target_path):
error_messages += checkFormat("./" + target_path)
else:
pool = multiprocessing.Pool(processes=args.num_workers)
results = []
# For each file in target_path, start a new task in the pool and collect the
# results (results is passed by reference, and is used as an output).
for root, _, files in os.walk(target_path):
checkFormatVisitor((pool, results, owned_directories, error_messages), root, files)
# Close the pool to new tasks, wait for all of the running tasks to finish,
# then collect the error messages.
pool.close()
pool.join()
error_messages += sum((r.get() for r in results), [])
if checkErrorMessages(error_messages):
print("ERROR: check format failed. run 'tools/check_format.py fix'")
sys.exit(1)
if operation_type == "check":
print("PASS")
| 41.82471 | 125 | 0.691805 | [
"Apache-2.0"
] | isholaomotayo/envoy | tools/check_format.py | 39,608 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.